[Patch V3 0/3] Enable irqs when waiting for rwlocks - Kernel

This is a discussion on [Patch V3 0/3] Enable irqs when waiting for rwlocks - Kernel ; New in V3: * Handle rearrangement of some arch's include/asm directories. New in V2: * get rid of ugly #ifdef's in kernel/spinlock.h * convert __raw_{read|write}_lock_flags to an inline func SGI has observed that on large systems, interrupts are not serviced ...

+ Reply to Thread
Results 1 to 4 of 4

Thread: [Patch V3 0/3] Enable irqs when waiting for rwlocks

  1. [Patch V3 0/3] Enable irqs when waiting for rwlocks

    New in V3:
    * Handle rearrangement of some arch's include/asm directories.

    New in V2:
    * get rid of ugly #ifdef's in kernel/spinlock.h
    * convert __raw_{read|write}_lock_flags to an inline func

    SGI has observed that on large systems, interrupts are not serviced for
    a long period of time when waiting for a rwlock. The following patch
    series re-enables irqs while waiting for the lock, resembling the code
    which is already there for spinlocks.

    I only made the ia64 version, because the patch adds some overhead to
    the fast path. I assume there is currently no demand to have this for
    other architectures, because the systems are not so large. Of course,
    the possibility to implement raw_{read|write}_lock_flags for any
    architecture is still there.

    Robin Holt
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. [Patch V3 1/3] Factor out #ifdefs from kernel/spinlock.c to LOCK_CONTENDED_FLAGS

    The new macro LOCK_CONTENDED_FLAGS expands to the correct implementation
    depending on the config options, so that IRQ's are re-enabled when
    possible, but they remain disabled if CONFIG_LOCKDEP is set.

    Signed-off-by: Petr Tesarik
    Signed-off-by: Robin Holt
    ---
    include/linux/lockdep.h | 17 +++++++++++++++++
    kernel/spinlock.c | 12 ++----------
    2 files changed, 19 insertions(+), 10 deletions(-)

    Index: rwlock/include/linux/lockdep.h
    ================================================== =================
    --- rwlock.orig/include/linux/lockdep.h 2008-11-04 05:54:18.456411408 -0600
    +++ rwlock/include/linux/lockdep.h 2008-11-04 05:55:43.335302610 -0600
    @@ -376,6 +376,23 @@ do { \

    #endif /* CONFIG_LOCK_STAT */

    +#ifdef CONFIG_LOCKDEP
    +
    +/*
    + * On lockdep we dont want the hand-coded irq-enable of
    + * _raw_*_lock_flags() code, because lockdep assumes
    + * that interrupts are not re-enabled during lock-acquire:
    + */
    +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
    + LOCK_CONTENDED((_lock), (try), (lock))
    +
    +#else /* CONFIG_LOCKDEP */
    +
    +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
    + lockfl((_lock), (flags))
    +
    +#endif /* CONFIG_LOCKDEP */
    +
    #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
    extern void early_init_irq_lock_class(void);
    #else
    Index: rwlock/kernel/spinlock.c
    ================================================== =================
    --- rwlock.orig/kernel/spinlock.c 2008-11-04 05:54:18.456411408 -0600
    +++ rwlock/kernel/spinlock.c 2008-11-04 06:00:35.504744806 -0600
    @@ -299,16 +299,8 @@ unsigned long __lockfunc _spin_lock_irqs
    local_irq_save(flags);
    preempt_disable();
    spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
    - /*
    - * On lockdep we dont want the hand-coded irq-enable of
    - * _raw_spin_lock_flags() code, because lockdep assumes
    - * that interrupts are not re-enabled during lock-acquire:
    - */
    -#ifdef CONFIG_LOCKDEP
    - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
    -#else
    - _raw_spin_lock_flags(lock, &flags);
    -#endif
    + LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
    + _raw_spin_lock_flags, &flags);
    return flags;
    }
    EXPORT_SYMBOL(_spin_lock_irqsave_nested);

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. [Patch V3 3/3] ia64: implement interrupt-enabling rwlocks

    Implement __raw_read_lock_flags and __raw_write_lock_flags for the
    ia64 architecture.

    Signed-off-by: Petr Tesarik
    Signed-off-by: Robin Holt
    ---
    spinlock.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++++-------------
    1 file changed, 63 insertions(+), 17 deletions(-)

    Index: rwlock/arch/ia64/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/ia64/include/asm/spinlock.h 2008-11-04 06:01:29.251624869 -0600
    +++ rwlock/arch/ia64/include/asm/spinlock.h 2008-11-04 06:01:32.500040620 -0600
    @@ -120,6 +120,38 @@ do { \
    #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
    #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)

    +#ifdef ASM_SUPPORTED
    +
    +static __always_inline void
    +__raw_read_lock_flags(raw_spinlock_t *lock, unsigned long flags)
    +{
    + __asm__ __volatile__ (
    + "tbit.nz p6, p0 = %1,%2\n"
    + "br.few 3f\n"
    + "1:\n"
    + "fetchadd4.rel r2 = [%0], -1;;\n"
    + "(p6) ssm psr.i\n"
    + "2:\n"
    + "hint @pause\n"
    + "ld4 r2 = [%0];;\n"
    + "cmp4.lt p7,p0 = r2, r0\n"
    + "(p7) br.cond.spnt.few 2b\n"
    + "(p6) rsm psr.i\n"
    + ";;\n"
    + "3:\n"
    + "fetchadd4.acq r2 = [%0], 1;;\n"
    + "cmp4.lt p7,p0 = r2, r0\n"
    + "(p7) br.cond.spnt.few 1b\n"
    + : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
    + : "p6", "p7", "r2", "memory");
    +}
    +
    +#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
    +
    +#else /* !ASM_SUPPORTED */
    +
    +#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
    +
    #define __raw_read_lock(rw) \
    do { \
    raw_rwlock_t *__read_lock_ptr = (rw); \
    @@ -131,6 +163,8 @@ do { \
    } \
    } while (0)

    +#endif /* !ASM_SUPPORTED */
    +
    #define __raw_read_unlock(rw) \
    do { \
    raw_rwlock_t *__read_lock_ptr = (rw); \
    @@ -138,20 +172,33 @@ do { \
    } while (0)

    #ifdef ASM_SUPPORTED
    -#define __raw_write_lock(rw) \
    -do { \
    - __asm__ __volatile__ ( \
    - "mov ar.ccv = r0\n" \
    - "dep r29 = -1, r0, 31, 1;;\n" \
    - "1:\n" \
    - "ld4 r2 = [%0];;\n" \
    - "cmp4.eq p0,p7 = r0,r2\n" \
    - "(p7) br.cond.spnt.few 1b \n" \
    - "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
    - "cmp4.eq p0,p7 = r0, r2\n" \
    - "(p7) br.cond.spnt.few 1b;;\n" \
    - :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
    -} while(0)
    +
    +static __always_inline void
    +__raw_write_lock_flags(raw_spinlock_t *lock, unsigned long flags)
    +{
    + __asm__ __volatile__ (
    + "tbit.nz p6, p0 = %1, %2\n"
    + "mov ar.ccv = r0\n"
    + "dep r29 = -1, r0, 31, 1\n"
    + "br.few 3f;;\n"
    + "1:\n"
    + "(p6) ssm psr.i\n"
    + "2:\n"
    + "hint @pause\n"
    + "ld4 r2 = [%0];;\n"
    + "cmp4.eq p0,p7 = r0, r2\n"
    + "(p7) br.cond.spnt.few 2b\n"
    + "(p6) rsm psr.i\n"
    + ";;\n"
    + "3:\n"
    + "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
    + "cmp4.eq p0,p7 = r0, r2\n"
    + "(p7) br.cond.spnt.few 1b;;\n"
    + : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
    + : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
    +}
    +
    +#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)

    #define __raw_write_trylock(rw) \
    ({ \
    @@ -174,6 +221,8 @@ static inline void __raw_write_unlock(ra

    #else /* !ASM_SUPPORTED */

    +#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
    +
    #define __raw_write_lock(l) \
    ({ \
    __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
    @@ -213,9 +262,6 @@ static inline int __raw_read_trylock(raw
    return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
    }

    -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    -
    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. [Patch V3 2/3] Allow rwlocks to re-enable interrupts

    Pass the original flags to rwlock arch-code, so that it can re-enable
    interrupts if implemented for that architecture.

    Initially, make __raw_read_lock_flags and __raw_write_lock_flags
    stubs which just do the same thing as non-flags variants.

    Signed-off-by: Petr Tesarik
    Signed-off-by: Robin Holt
    ---
    arch/alpha/include/asm/spinlock.h | 3 +++
    arch/arm/include/asm/spinlock.h | 3 +++
    arch/cris/include/arch-v32/arch/spinlock.h | 2 ++
    arch/ia64/include/asm/spinlock.h | 3 +++
    arch/mips/include/asm/spinlock.h | 2 ++
    arch/parisc/include/asm/spinlock.h | 3 +++
    arch/powerpc/include/asm/spinlock.h | 3 +++
    arch/s390/include/asm/spinlock.h | 3 +++
    arch/sh/include/asm/spinlock.h | 3 +++
    arch/sparc/include/asm/spinlock_32.h | 2 ++
    arch/sparc/include/asm/spinlock_64.h | 2 ++
    arch/x86/include/asm/spinlock.h | 3 +++
    include/asm-m32r/spinlock.h | 3 +++
    include/linux/spinlock.h | 6 ++++++
    kernel/spinlock.c | 6 ++++--
    15 files changed, 45 insertions(+), 2 deletions(-)

    Index: rwlock/arch/alpha/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/alpha/include/asm/spinlock.h 2008-11-04 05:54:18.340396518 -0600
    +++ rwlock/arch/alpha/include/asm/spinlock.h 2008-11-04 05:58:47.862958623 -0600
    @@ -166,6 +166,9 @@ static inline void __raw_write_unlock(ra
    lock->lock = 0;
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()
    Index: rwlock/arch/arm/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/arm/include/asm/spinlock.h 2008-11-04 05:54:18.340396518 -0600
    +++ rwlock/arch/arm/include/asm/spinlock.h 2008-11-04 05:58:47.874960161 -0600
    @@ -217,6 +217,9 @@ static inline int __raw_read_trylock(raw
    /* read_can_lock - would read_trylock() succeed? */
    #define __raw_read_can_lock(x) ((x)->lock < 0x80000000)

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()
    Index: rwlock/arch/ia64/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/ia64/include/asm/spinlock.h 2008-11-04 05:54:18.340396518 -0600
    +++ rwlock/arch/ia64/include/asm/spinlock.h 2008-11-04 05:58:47.894962723 -0600
    @@ -213,6 +213,9 @@ static inline int __raw_read_trylock(raw
    return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()
    Index: rwlock/arch/mips/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/mips/include/asm/spinlock.h 2008-11-04 05:54:18.340396518 -0600
    +++ rwlock/arch/mips/include/asm/spinlock.h 2008-11-04 05:58:47.918965799 -0600
    @@ -479,6 +479,8 @@ static inline int __raw_write_trylock(ra
    return ret;
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)

    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    Index: rwlock/arch/powerpc/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/powerpc/include/asm/spinlock.h 2008-11-04 05:54:18.340396518 -0600
    +++ rwlock/arch/powerpc/include/asm/spinlock.h 2008-11-04 05:58:47.942968874 -0600
    @@ -287,6 +287,9 @@ static inline void __raw_write_unlock(ra
    rw->lock = 0;
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_spin_relax(lock) __spin_yield(lock)
    #define _raw_read_relax(lock) __rw_yield(lock)
    #define _raw_write_relax(lock) __rw_yield(lock)
    Index: rwlock/arch/s390/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/s390/include/asm/spinlock.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/arch/s390/include/asm/spinlock.h 2008-11-04 05:58:47.966971949 -0600
    @@ -172,6 +172,9 @@ static inline int __raw_write_trylock(ra
    return _raw_write_trylock_retry(rw);
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()

    Index: rwlock/arch/sh/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/sh/include/asm/spinlock.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/arch/sh/include/asm/spinlock.h 2008-11-04 05:58:47.990975024 -0600
    @@ -216,6 +216,9 @@ static inline int __raw_write_trylock(ra
    return (oldval > (RW_LOCK_BIAS - 1));
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()
    Index: rwlock/arch/sparc/include/asm/spinlock_32.h
    ================================================== =================
    --- rwlock.orig/arch/sparc/include/asm/spinlock_32.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/arch/sparc/include/asm/spinlock_32.h 2008-11-04 05:58:48.014978100 -0600
    @@ -177,6 +177,8 @@ static inline int __read_trylock(raw_rwl
    #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)

    #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
    +#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
    +#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)

    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    Index: rwlock/arch/sparc/include/asm/spinlock_64.h
    ================================================== =================
    --- rwlock.orig/arch/sparc/include/asm/spinlock_64.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/arch/sparc/include/asm/spinlock_64.h 2008-11-04 05:58:48.034980662 -0600
    @@ -230,9 +230,11 @@ static int inline __write_trylock(raw_rw
    }

    #define __raw_read_lock(p) __read_lock(p)
    +#define __raw_read_lock_flags(p, f) __read_lock(p)
    #define __raw_read_trylock(p) __read_trylock(p)
    #define __raw_read_unlock(p) __read_unlock(p)
    #define __raw_write_lock(p) __write_lock(p)
    +#define __raw_write_lock_flags(p, f) __write_lock(p)
    #define __raw_write_unlock(p) __write_unlock(p)
    #define __raw_write_trylock(p) __write_trylock(p)

    Index: rwlock/arch/cris/include/arch-v32/arch/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/cris/include/arch-v32/arch/spinlock.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/arch/cris/include/arch-v32/arch/spinlock.h 2008-11-04 05:58:48.050982713 -0600
    @@ -121,6 +121,8 @@ static inline int __raw_write_trylock(r
    return 1;
    }

    +#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
    +#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)

    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    Index: rwlock/include/asm-m32r/spinlock.h
    ================================================== =================
    --- rwlock.orig/include/asm-m32r/spinlock.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/include/asm-m32r/spinlock.h 2008-11-04 05:58:48.074985788 -0600
    @@ -316,6 +316,9 @@ static inline int __raw_write_trylock(ra
    return 0;
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()
    Index: rwlock/arch/parisc/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/parisc/include/asm/spinlock.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/arch/parisc/include/asm/spinlock.h 2008-11-04 05:58:48.098988863 -0600
    @@ -187,6 +187,9 @@ static __inline__ int __raw_write_can_lo
    return !rw->counter;
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()
    Index: rwlock/arch/x86/include/asm/spinlock.h
    ================================================== =================
    --- rwlock.orig/arch/x86/include/asm/spinlock.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/arch/x86/include/asm/spinlock.h 2008-11-04 05:58:48.122991938 -0600
    @@ -357,6 +357,9 @@ static inline void __raw_write_unlock(ra
    : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
    }

    +#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
    +#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
    +
    #define _raw_spin_relax(lock) cpu_relax()
    #define _raw_read_relax(lock) cpu_relax()
    #define _raw_write_relax(lock) cpu_relax()
    Index: rwlock/include/linux/spinlock.h
    ================================================== =================
    --- rwlock.orig/include/linux/spinlock.h 2008-11-04 05:54:18.344397031 -0600
    +++ rwlock/include/linux/spinlock.h 2008-11-04 05:58:48.142994501 -0600
    @@ -148,9 +148,11 @@ do { \
    extern int _raw_spin_trylock(spinlock_t *lock);
    extern void _raw_spin_unlock(spinlock_t *lock);
    extern void _raw_read_lock(rwlock_t *lock);
    +#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
    extern int _raw_read_trylock(rwlock_t *lock);
    extern void _raw_read_unlock(rwlock_t *lock);
    extern void _raw_write_lock(rwlock_t *lock);
    +#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
    extern int _raw_write_trylock(rwlock_t *lock);
    extern void _raw_write_unlock(rwlock_t *lock);
    #else
    @@ -160,9 +162,13 @@ do { \
    # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
    # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
    # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
    +# define _raw_read_lock_flags(lock, flags) \
    + __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
    # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
    # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
    # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
    +# define _raw_write_lock_flags(lock, flags) \
    + __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
    # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
    # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
    #endif
    Index: rwlock/kernel/spinlock.c
    ================================================== =================
    --- rwlock.orig/kernel/spinlock.c 2008-11-04 05:55:43.347304150 -0600
    +++ rwlock/kernel/spinlock.c 2008-11-04 05:58:48.166997576 -0600
    @@ -121,7 +121,8 @@ unsigned long __lockfunc _read_lock_irqs
    local_irq_save(flags);
    preempt_disable();
    rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
    - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
    + LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
    + _raw_read_lock_flags, &flags);
    return flags;
    }
    EXPORT_SYMBOL(_read_lock_irqsave);
    @@ -151,7 +152,8 @@ unsigned long __lockfunc _write_lock_irq
    local_irq_save(flags);
    preempt_disable();
    rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
    - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
    + LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
    + _raw_write_lock_flags, &flags);
    return flags;
    }
    EXPORT_SYMBOL(_write_lock_irqsave);

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread