[git pull] generic bitops - Kernel

This is a discussion on [git pull] generic bitops - Kernel ; Linus, please pull the generic bitops tree from: git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-generic-bitops.git for-linus this started out as improvements/generalizations to x86 bitops, but grew generic impact (and generic optimizations) as well, so it's offered as a separate tree. Thanks, Ingo ------------------> Alexander van Heukelum ...

+ Reply to Thread
Page 1 of 2 1 2 LastLast
Results 1 to 20 of 32

Thread: [git pull] generic bitops

  1. [git pull] generic bitops


    Linus, please pull the generic bitops tree from:

    git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-generic-bitops.git for-linus

    this started out as improvements/generalizations to x86 bitops, but grew
    generic impact (and generic optimizations) as well, so it's offered as a
    separate tree.

    Thanks,

    Ingo

    ------------------>
    Alexander van Heukelum (12):
    x86: change x86 to use generic find_next_bit
    x86, uml: fix uml with generic find_next_bit for x86
    x86, generic: optimize find_next_(zero_)bit for small constant-size bitmaps
    x86: merge the simple bitops and move them to bitops.h
    generic: introduce a generic __fls implementation
    generic: implement __fls on all 64-bit archs
    bitops: use __fls for fls64 on 64-bit archs
    x86: generic versions of find_first_(zero_)bit, convert i386
    x86: switch 64-bit to generic find_first_bit
    x86: optimize find_first_bit for small bitmaps
    x86, UML: remove x86-specific implementations of find_first_bit
    x86: finalize bitops unification

    Joe Perches (1):
    x86: include/asm-x86/pgalloc.h/bitops.h: checkpatch cleanups - formatting only

    arch/um/Kconfig.i386 | 8 ++
    arch/um/Kconfig.x86_64 | 8 ++
    arch/um/sys-i386/Makefile | 2 +-
    arch/um/sys-x86_64/Makefile | 2 +-
    arch/x86/Kconfig | 6 ++
    arch/x86/Kconfig.cpu | 2 +-
    arch/x86/lib/Makefile | 3 +-
    arch/x86/lib/bitops_32.c | 70 --------------
    arch/x86/lib/bitops_64.c | 175 ------------------------------------
    include/asm-alpha/bitops.h | 5 +
    include/asm-generic/bitops/__fls.h | 43 +++++++++
    include/asm-generic/bitops/find.h | 2 +
    include/asm-generic/bitops/fls64.h | 22 +++++
    include/asm-ia64/bitops.h | 16 ++++
    include/asm-mips/bitops.h | 5 +
    include/asm-parisc/bitops.h | 1 +
    include/asm-powerpc/bitops.h | 5 +
    include/asm-s390/bitops.h | 1 +
    include/asm-sh/bitops.h | 1 +
    include/asm-sparc64/bitops.h | 1 +
    include/asm-x86/bitops.h | 149 ++++++++++++++++++++++++++++---
    include/asm-x86/bitops_32.h | 166 ----------------------------------
    include/asm-x86/bitops_64.h | 162 ---------------------------------
    include/linux/bitops.h | 140 ++++++++++++++++++++++++++++
    lib/Makefile | 1 +
    lib/find_next_bit.c | 77 +++++++++++++---
    26 files changed, 470 insertions(+), 603 deletions(-)
    delete mode 100644 arch/x86/lib/bitops_32.c
    delete mode 100644 arch/x86/lib/bitops_64.c
    create mode 100644 include/asm-generic/bitops/__fls.h
    delete mode 100644 include/asm-x86/bitops_32.h
    delete mode 100644 include/asm-x86/bitops_64.h

    diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
    index e09edfa..49990ea 100644
    --- a/arch/um/Kconfig.i386
    +++ b/arch/um/Kconfig.i386
    @@ -39,6 +39,14 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA
    bool
    default y

    +config GENERIC_FIND_FIRST_BIT
    + bool
    + default y
    +
    +config GENERIC_FIND_NEXT_BIT
    + bool
    + default y
    +
    config GENERIC_HWEIGHT
    bool
    default y
    diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
    index 3fbe69e..7a75043 100644
    --- a/arch/um/Kconfig.x86_64
    +++ b/arch/um/Kconfig.x86_64
    @@ -27,6 +27,14 @@ config SMP_BROKEN
    bool
    default y

    +config GENERIC_FIND_FIRST_BIT
    + bool
    + default y
    +
    +config GENERIC_FIND_NEXT_BIT
    + bool
    + default y
    +
    config GENERIC_HWEIGHT
    bool
    default y
    diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
    index 964dc1a..598b5c1 100644
    --- a/arch/um/sys-i386/Makefile
    +++ b/arch/um/sys-i386/Makefile
    @@ -6,7 +6,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
    ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
    sys_call_table.o tls.o

    -subarch-obj-y = lib/bitops_32.o lib/semaphore_32.o lib/string_32.o
    +subarch-obj-y = lib/semaphore_32.o lib/string_32.o
    subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
    subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o

    diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
    index 3c22de5..c8b4cce 100644
    --- a/arch/um/sys-x86_64/Makefile
    +++ b/arch/um/sys-x86_64/Makefile
    @@ -10,7 +10,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \

    obj-$(CONFIG_MODULES) += um_module.o

    -subarch-obj-y = lib/bitops_64.o lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
    +subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
    subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o

    ldt-y = ../sys-i386/ldt.o
    diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
    index 87a693c..d1a5a44 100644
    --- a/arch/x86/Kconfig
    +++ b/arch/x86/Kconfig
    @@ -77,6 +77,12 @@ config GENERIC_BUG
    def_bool y
    depends on BUG

    +config GENERIC_FIND_FIRST_BIT
    + def_bool y
    +
    +config GENERIC_FIND_NEXT_BIT
    + def_bool y
    +
    config GENERIC_HWEIGHT
    def_bool y

    diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
    index 57072f2..b9368f5 100644
    --- a/arch/x86/Kconfig.cpu
    +++ b/arch/x86/Kconfig.cpu
    @@ -398,7 +398,7 @@ config X86_TSC
    # generates cmov.
    config X86_CMOV
    def_bool y
    - depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7)
    + depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || X86_64)

    config X86_MINIMUM_CPU_FAMILY
    int
    diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
    index 25df1c1..76f60f5 100644
    --- a/arch/x86/lib/Makefile
    +++ b/arch/x86/lib/Makefile
    @@ -11,7 +11,7 @@ lib-y += memcpy_$(BITS).o
    ifeq ($(CONFIG_X86_32),y)
    lib-y += checksum_32.o
    lib-y += strstr_32.o
    - lib-y += bitops_32.o semaphore_32.o string_32.o
    + lib-y += semaphore_32.o string_32.o

    lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
    else
    @@ -21,7 +21,6 @@ else

    lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
    lib-y += thunk_64.o clear_page_64.o copy_page_64.o
    - lib-y += bitops_64.o
    lib-y += memmove_64.o memset_64.o
    lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
    endif
    diff --git a/arch/x86/lib/bitops_32.c b/arch/x86/lib/bitops_32.c
    deleted file mode 100644
    index b654404..0000000
    --- a/arch/x86/lib/bitops_32.c
    +++ /dev/null
    @@ -1,70 +0,0 @@
    -#include
    -#include
    -
    -/**
    - * find_next_bit - find the next set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_bit(const unsigned long *addr, int size, int offset)
    -{
    - const unsigned long *p = addr + (offset >> 5);
    - int set = 0, bit = offset & 31, res;
    -
    - if (bit) {
    - /*
    - * Look for nonzero in the first 32 bits:
    - */
    - __asm__("bsfl %1,%0\n\t"
    - "jne 1f\n\t"
    - "movl $32, %0\n"
    - "1:"
    - : "=r" (set)
    - : "r" (*p >> bit));
    - if (set < (32 - bit))
    - return set + offset;
    - set = 32 - bit;
    - p++;
    - }
    - /*
    - * No set bit yet, search remaining full words for a bit
    - */
    - res = find_first_bit (p, size - 32 * (p - addr));
    - return (offset + set + res);
    -}
    -EXPORT_SYMBOL(find_next_bit);
    -
    -/**
    - * find_next_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_zero_bit(const unsigned long *addr, int size, int offset)
    -{
    - const unsigned long *p = addr + (offset >> 5);
    - int set = 0, bit = offset & 31, res;
    -
    - if (bit) {
    - /*
    - * Look for zero in the first 32 bits.
    - */
    - __asm__("bsfl %1,%0\n\t"
    - "jne 1f\n\t"
    - "movl $32, %0\n"
    - "1:"
    - : "=r" (set)
    - : "r" (~(*p >> bit)));
    - if (set < (32 - bit))
    - return set + offset;
    - set = 32 - bit;
    - p++;
    - }
    - /*
    - * No zero yet, search remaining full bytes for a zero
    - */
    - res = find_first_zero_bit(p, size - 32 * (p - addr));
    - return (offset + set + res);
    -}
    -EXPORT_SYMBOL(find_next_zero_bit);
    diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c
    deleted file mode 100644
    index 0e8f491..0000000
    --- a/arch/x86/lib/bitops_64.c
    +++ /dev/null
    @@ -1,175 +0,0 @@
    -#include
    -
    -#undef find_first_zero_bit
    -#undef find_next_zero_bit
    -#undef find_first_bit
    -#undef find_next_bit
    -
    -static inline long
    -__find_first_zero_bit(const unsigned long * addr, unsigned long size)
    -{
    - long d0, d1, d2;
    - long res;
    -
    - /*
    - * We must test the size in words, not in bits, because
    - * otherwise incoming sizes in the range -63..-1 will not run
    - * any scasq instructions, and then the flags used by the je
    - * instruction will have whatever random value was in place
    - * before. Nobody should call us like that, but
    - * find_next_zero_bit() does when offset and size are at the
    - * same word and it fails to find a zero itself.
    - */
    - size += 63;
    - size >>= 6;
    - if (!size)
    - return 0;
    - asm volatile(
    - " repe; scasq\n"
    - " je 1f\n"
    - " xorq -8(%%rdi),%%rax\n"
    - " subq $8,%%rdi\n"
    - " bsfq %%rax,%%rdx\n"
    - "1: subq %[addr],%%rdi\n"
    - " shlq $3,%%rdi\n"
    - " addq %%rdi,%%rdx"
    - :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
    - :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
    - [addr] "S" (addr) : "memory");
    - /*
    - * Any register would do for [addr] above, but GCC tends to
    - * prefer rbx over rsi, even though rsi is readily available
    - * and doesn't have to be saved.
    - */
    - return res;
    -}
    -
    -/**
    - * find_first_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit-number of the first zero bit, not the number of the byte
    - * containing a bit.
    - */
    -long find_first_zero_bit(const unsigned long * addr, unsigned long size)
    -{
    - return __find_first_zero_bit (addr, size);
    -}
    -
    -/**
    - * find_next_zero_bit - find the next zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -long find_next_zero_bit (const unsigned long * addr, long size, long offset)
    -{
    - const unsigned long * p = addr + (offset >> 6);
    - unsigned long set = 0;
    - unsigned long res, bit = offset&63;
    -
    - if (bit) {
    - /*
    - * Look for zero in first word
    - */
    - asm("bsfq %1,%0\n\t"
    - "cmoveq %2,%0"
    - : "=r" (set)
    - : "r" (~(*p >> bit)), "r"(64L));
    - if (set < (64 - bit))
    - return set + offset;
    - set = 64 - bit;
    - p++;
    - }
    - /*
    - * No zero yet, search remaining full words for a zero
    - */
    - res = __find_first_zero_bit (p, size - 64 * (p - addr));
    -
    - return (offset + set + res);
    -}
    -
    -static inline long
    -__find_first_bit(const unsigned long * addr, unsigned long size)
    -{
    - long d0, d1;
    - long res;
    -
    - /*
    - * We must test the size in words, not in bits, because
    - * otherwise incoming sizes in the range -63..-1 will not run
    - * any scasq instructions, and then the flags used by the jz
    - * instruction will have whatever random value was in place
    - * before. Nobody should call us like that, but
    - * find_next_bit() does when offset and size are at the same
    - * word and it fails to find a one itself.
    - */
    - size += 63;
    - size >>= 6;
    - if (!size)
    - return 0;
    - asm volatile(
    - " repe; scasq\n"
    - " jz 1f\n"
    - " subq $8,%%rdi\n"
    - " bsfq (%%rdi),%%rax\n"
    - "1: subq %[addr],%%rdi\n"
    - " shlq $3,%%rdi\n"
    - " addq %%rdi,%%rax"
    - :"=a" (res), "=&c" (d0), "=&D" (d1)
    - :"0" (0ULL), "1" (size), "2" (addr),
    - [addr] "r" (addr) : "memory");
    - return res;
    -}
    -
    -/**
    - * find_first_bit - find the first set bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit-number of the first set bit, not the number of the byte
    - * containing a bit.
    - */
    -long find_first_bit(const unsigned long * addr, unsigned long size)
    -{
    - return __find_first_bit(addr,size);
    -}
    -
    -/**
    - * find_next_bit - find the first set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -long find_next_bit(const unsigned long * addr, long size, long offset)
    -{
    - const unsigned long * p = addr + (offset >> 6);
    - unsigned long set = 0, bit = offset & 63, res;
    -
    - if (bit) {
    - /*
    - * Look for nonzero in the first 64 bits:
    - */
    - asm("bsfq %1,%0\n\t"
    - "cmoveq %2,%0\n\t"
    - : "=r" (set)
    - : "r" (*p >> bit), "r" (64L));
    - if (set < (64 - bit))
    - return set + offset;
    - set = 64 - bit;
    - p++;
    - }
    - /*
    - * No set bit yet, search remaining full words for a bit
    - */
    - res = __find_first_bit (p, size - 64 * (p - addr));
    - return (offset + set + res);
    -}
    -
    -#include
    -
    -EXPORT_SYMBOL(find_next_bit);
    -EXPORT_SYMBOL(find_first_bit);
    -EXPORT_SYMBOL(find_first_zero_bit);
    -EXPORT_SYMBOL(find_next_zero_bit);
    diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
    index 9e19a70..15f3ae2 100644
    --- a/include/asm-alpha/bitops.h
    +++ b/include/asm-alpha/bitops.h
    @@ -388,6 +388,11 @@ static inline int fls64(unsigned long x)
    }
    #endif

    +static inline unsigned long __fls(unsigned long x)
    +{
    + return fls64(x) - 1;
    +}
    +
    static inline int fls(int x)
    {
    return fls64((unsigned int) x);
    diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
    new file mode 100644
    index 0000000..be24465
    --- /dev/null
    +++ b/include/asm-generic/bitops/__fls.h
    @@ -0,0 +1,43 @@
    +#ifndef _ASM_GENERIC_BITOPS___FLS_H_
    +#define _ASM_GENERIC_BITOPS___FLS_H_
    +
    +#include
    +
    +/**
    + * __fls - find last (most-significant) set bit in a long word
    + * @word: the word to search
    + *
    + * Undefined if no set bit exists, so code should check against 0 first.
    + */
    +static inline unsigned long __fls(unsigned long word)
    +{
    + int num = BITS_PER_LONG - 1;
    +
    +#if BITS_PER_LONG == 64
    + if (!(word & (~0ul << 32))) {
    + num -= 32;
    + word <<= 32;
    + }
    +#endif
    + if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
    + num -= 16;
    + word <<= 16;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
    + num -= 8;
    + word <<= 8;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
    + num -= 4;
    + word <<= 4;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
    + num -= 2;
    + word <<= 2;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-1))))
    + num -= 1;
    + return num;
    +}
    +
    +#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
    diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
    index 72a51e5..1914e97 100644
    --- a/include/asm-generic/bitops/find.h
    +++ b/include/asm-generic/bitops/find.h
    @@ -1,11 +1,13 @@
    #ifndef _ASM_GENERIC_BITOPS_FIND_H_
    #define _ASM_GENERIC_BITOPS_FIND_H_

    +#ifndef CONFIG_GENERIC_FIND_NEXT_BIT
    extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
    size, unsigned long offset);

    extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
    long size, unsigned long offset);
    +#endif

    #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
    #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
    diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
    index 1b6b17c..86d403f 100644
    --- a/include/asm-generic/bitops/fls64.h
    +++ b/include/asm-generic/bitops/fls64.h
    @@ -3,6 +3,18 @@

    #include

    +/**
    + * fls64 - find last set bit in a 64-bit word
    + * @x: the word to search
    + *
    + * This is defined in a similar way as the libc and compiler builtin
    + * ffsll, but returns the position of the most significant set bit.
    + *
    + * fls64(value) returns 0 if value is 0 or the position of the last
    + * set bit if value is nonzero. The last (most significant) bit is
    + * at position 64.
    + */
    +#if BITS_PER_LONG == 32
    static inline int fls64(__u64 x)
    {
    __u32 h = x >> 32;
    @@ -10,5 +22,15 @@ static inline int fls64(__u64 x)
    return fls(h) + 32;
    return fls(x);
    }
    +#elif BITS_PER_LONG == 64
    +static inline int fls64(__u64 x)
    +{
    + if (x == 0)
    + return 0;
    + return __fls(x) + 1;
    +}
    +#else
    +#error BITS_PER_LONG not 32 or 64
    +#endif

    #endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
    diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
    index 953d3df..e2ca800 100644
    --- a/include/asm-ia64/bitops.h
    +++ b/include/asm-ia64/bitops.h
    @@ -407,6 +407,22 @@ fls (int t)
    return ia64_popcnt(x);
    }

    +/*
    + * Find the last (most significant) bit set. Undefined for x==0.
    + * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
    + */
    +static inline unsigned long
    +__fls (unsigned long x)
    +{
    + x |= x >> 1;
    + x |= x >> 2;
    + x |= x >> 4;
    + x |= x >> 8;
    + x |= x >> 16;
    + x |= x >> 32;
    + return ia64_popcnt(x) - 1;
    +}
    +
    #include

    /*
    diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
    index ec75ce4..c2bd126 100644
    --- a/include/asm-mips/bitops.h
    +++ b/include/asm-mips/bitops.h
    @@ -591,6 +591,11 @@ static inline int __ilog2(unsigned long x)
    return 63 - lz;
    }

    +static inline unsigned long __fls(unsigned long x)
    +{
    + return __ilog2(x);
    +}
    +
    #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)

    /*
    diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
    index f8eebcb..7a6ea10 100644
    --- a/include/asm-parisc/bitops.h
    +++ b/include/asm-parisc/bitops.h
    @@ -210,6 +210,7 @@ static __inline__ int fls(int x)
    return ret;
    }

    +#include
    #include
    #include
    #include
    diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
    index a99a749..897eade 100644
    --- a/include/asm-powerpc/bitops.h
    +++ b/include/asm-powerpc/bitops.h
    @@ -313,6 +313,11 @@ static __inline__ int fls(unsigned int x)
    return 32 - lz;
    }

    +static __inline__ unsigned long __fls(unsigned long x)
    +{
    + return __ilog2(x);
    +}
    +
    /*
    * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
    * instruction; for 32-bit we use the generic version, which does two
    diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
    index 965394e..b4eb24a 100644
    --- a/include/asm-s390/bitops.h
    +++ b/include/asm-s390/bitops.h
    @@ -769,6 +769,7 @@ static inline int sched_find_first_bit(unsigned long *b)
    }

    #include
    +#include
    #include

    #include
    diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
    index b6ba5a6..d7d382f 100644
    --- a/include/asm-sh/bitops.h
    +++ b/include/asm-sh/bitops.h
    @@ -95,6 +95,7 @@ static inline unsigned long ffz(unsigned long word)
    #include
    #include
    #include
    +#include
    #include

    #endif /* __KERNEL__ */
    diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
    index 982ce89..11f9d81 100644
    --- a/include/asm-sparc64/bitops.h
    +++ b/include/asm-sparc64/bitops.h
    @@ -34,6 +34,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
    #include
    #include
    #include
    +#include
    #include

    #ifdef __KERNEL__
    diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
    index 1ae7b27..b81a4d4 100644
    --- a/include/asm-x86/bitops.h
    +++ b/include/asm-x86/bitops.h
    @@ -62,12 +62,9 @@ static inline void set_bit(int nr, volatile void *addr)
    */
    static inline void __set_bit(int nr, volatile void *addr)
    {
    - asm volatile("bts %1,%0"
    - : ADDR
    - : "Ir" (nr) : "memory");
    + asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
    }

    -
    /**
    * clear_bit - Clears a bit in memory
    * @nr: Bit to clear
    @@ -297,19 +294,145 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
    static int test_bit(int nr, const volatile unsigned long *addr);
    #endif

    -#define test_bit(nr,addr) \
    - (__builtin_constant_p(nr) ? \
    - constant_test_bit((nr),(addr)) : \
    - variable_test_bit((nr),(addr)))
    +#define test_bit(nr, addr) \
    + (__builtin_constant_p((nr)) \
    + ? constant_test_bit((nr), (addr)) \
    + : variable_test_bit((nr), (addr)))
    +
    +/**
    + * __ffs - find first set bit in word
    + * @word: The word to search
    + *
    + * Undefined if no bit exists, so code should check against 0 first.
    + */
    +static inline unsigned long __ffs(unsigned long word)
    +{
    + asm("bsf %1,%0"
    + : "=r" (word)
    + : "rm" (word));
    + return word;
    +}
    +
    +/**
    + * ffz - find first zero bit in word
    + * @word: The word to search
    + *
    + * Undefined if no zero exists, so code should check against ~0UL first.
    + */
    +static inline unsigned long ffz(unsigned long word)
    +{
    + asm("bsf %1,%0"
    + : "=r" (word)
    + : "r" (~word));
    + return word;
    +}
    +
    +/*
    + * __fls: find last set bit in word
    + * @word: The word to search
    + *
    + * Undefined if no zero exists, so code should check against ~0UL first.
    + */
    +static inline unsigned long __fls(unsigned long word)
    +{
    + asm("bsr %1,%0"
    + : "=r" (word)
    + : "rm" (word));
    + return word;
    +}
    +
    +#ifdef __KERNEL__
    +/**
    + * ffs - find first set bit in word
    + * @x: the word to search
    + *
    + * This is defined the same way as the libc and compiler builtin ffs
    + * routines, therefore differs in spirit from the other bitops.
    + *
    + * ffs(value) returns 0 if value is 0 or the position of the first
    + * set bit if value is nonzero. The first (least significant) bit
    + * is at position 1.
    + */
    +static inline int ffs(int x)
    +{
    + int r;
    +#ifdef CONFIG_X86_CMOV
    + asm("bsfl %1,%0\n\t"
    + "cmovzl %2,%0"
    + : "=r" (r) : "rm" (x), "r" (-1));
    +#else
    + asm("bsfl %1,%0\n\t"
    + "jnz 1f\n\t"
    + "movl $-1,%0\n"
    + "1:" : "=r" (r) : "rm" (x));
    +#endif
    + return r + 1;
    +}
    +
    +/**
    + * fls - find last set bit in word
    + * @x: the word to search
    + *
    + * This is defined in a similar way as the libc and compiler builtin
    + * ffs, but returns the position of the most significant set bit.
    + *
    + * fls(value) returns 0 if value is 0 or the position of the last
    + * set bit if value is nonzero. The last (most significant) bit is
    + * at position 32.
    + */
    +static inline int fls(int x)
    +{
    + int r;
    +#ifdef CONFIG_X86_CMOV
    + asm("bsrl %1,%0\n\t"
    + "cmovzl %2,%0"
    + : "=&r" (r) : "rm" (x), "rm" (-1));
    +#else
    + asm("bsrl %1,%0\n\t"
    + "jnz 1f\n\t"
    + "movl $-1,%0\n"
    + "1:" : "=r" (r) : "rm" (x));
    +#endif
    + return r + 1;
    +}
    +#endif /* __KERNEL__ */

    #undef BASE_ADDR
    #undef BIT_ADDR
    #undef ADDR

    -#ifdef CONFIG_X86_32
    -# include "bitops_32.h"
    -#else
    -# include "bitops_64.h"
    -#endif
    +static inline void set_bit_string(unsigned long *bitmap,
    + unsigned long i, int len)
    +{
    + unsigned long end = i + len;
    + while (i < end) {
    + __set_bit(i, bitmap);
    + i++;
    + }
    +}
    +
    +#ifdef __KERNEL__
    +
    +#include
    +
    +#define ARCH_HAS_FAST_MULTIPLIER 1
    +
    +#include
    +
    +#endif /* __KERNEL__ */
    +
    +#include
    +
    +#ifdef __KERNEL__
    +
    +#include
    +
    +#define ext2_set_bit_atomic(lock, nr, addr) \
    + test_and_set_bit((nr), (unsigned long *)(addr))
    +#define ext2_clear_bit_atomic(lock, nr, addr) \
    + test_and_clear_bit((nr), (unsigned long *)(addr))
    +
    +#include

    +#endif /* __KERNEL__ */
    #endif /* _ASM_X86_BITOPS_H */
    diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
    deleted file mode 100644
    index 2513a81..0000000
    --- a/include/asm-x86/bitops_32.h
    +++ /dev/null
    @@ -1,166 +0,0 @@
    -#ifndef _I386_BITOPS_H
    -#define _I386_BITOPS_H
    -
    -/*
    - * Copyright 1992, Linus Torvalds.
    - */
    -
    -/**
    - * find_first_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit number of the first zero bit, not the number of the byte
    - * containing a bit.
    - */
    -static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
    -{
    - int d0, d1, d2;
    - int res;
    -
    - if (!size)
    - return 0;
    - /* This looks at memory.
    - * Mark it volatile to tell gcc not to move it around
    - */
    - asm volatile("movl $-1,%%eax\n\t"
    - "xorl %%edx,%%edx\n\t"
    - "repe; scasl\n\t"
    - "je 1f\n\t"
    - "xorl -4(%%edi),%%eax\n\t"
    - "subl $4,%%edi\n\t"
    - "bsfl %%eax,%%edx\n"
    - "1:\tsubl %%ebx,%%edi\n\t"
    - "shll $3,%%edi\n\t"
    - "addl %%edi,%%edx"
    - : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
    - : "1" ((size + 31) >> 5), "2" (addr),
    - "b" (addr) : "memory");
    - return res;
    -}
    -
    -/**
    - * find_next_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bit number to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_zero_bit(const unsigned long *addr, int size, int offset);
    -
    -/**
    - * __ffs - find first bit in word.
    - * @word: The word to search
    - *
    - * Undefined if no bit exists, so code should check against 0 first.
    - */
    -static inline unsigned long __ffs(unsigned long word)
    -{
    - __asm__("bsfl %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -/**
    - * find_first_bit - find the first set bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit number of the first set bit, not the number of the byte
    - * containing a bit.
    - */
    -static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
    -{
    - unsigned x = 0;
    -
    - while (x < size) {
    - unsigned long val = *addr++;
    - if (val)
    - return __ffs(val) + x;
    - x += sizeof(*addr) << 3;
    - }
    - return x;
    -}
    -
    -/**
    - * find_next_bit - find the first set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bit number to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_bit(const unsigned long *addr, int size, int offset);
    -
    -/**
    - * ffz - find first zero in word.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long ffz(unsigned long word)
    -{
    - __asm__("bsfl %1,%0"
    - :"=r" (word)
    - :"r" (~word));
    - return word;
    -}
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -/**
    - * ffs - find first bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as
    - * the libc and compiler builtin ffs routines, therefore
    - * differs in spirit from the above ffz() (man ffs).
    - */
    -static inline int ffs(int x)
    -{
    - int r;
    -
    - __asm__("bsfl %1,%0\n\t"
    - "jnz 1f\n\t"
    - "movl $-1,%0\n"
    - "1:" : "=r" (r) : "rm" (x));
    - return r+1;
    -}
    -
    -/**
    - * fls - find last bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as ffs().
    - */
    -static inline int fls(int x)
    -{
    - int r;
    -
    - __asm__("bsrl %1,%0\n\t"
    - "jnz 1f\n\t"
    - "movl $-1,%0\n"
    - "1:" : "=r" (r) : "rm" (x));
    - return r+1;
    -}
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#include
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -#define ext2_set_bit_atomic(lock, nr, addr) \
    - test_and_set_bit((nr), (unsigned long *)(addr))
    -#define ext2_clear_bit_atomic(lock, nr, addr) \
    - test_and_clear_bit((nr), (unsigned long *)(addr))
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#endif /* _I386_BITOPS_H */
    diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
    deleted file mode 100644
    index 365f820..0000000
    --- a/include/asm-x86/bitops_64.h
    +++ /dev/null
    @@ -1,162 +0,0 @@
    -#ifndef _X86_64_BITOPS_H
    -#define _X86_64_BITOPS_H
    -
    -/*
    - * Copyright 1992, Linus Torvalds.
    - */
    -
    -extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
    -extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
    -extern long find_first_bit(const unsigned long *addr, unsigned long size);
    -extern long find_next_bit(const unsigned long *addr, long size, long offset);
    -
    -/* return index of first bet set in val or max when no bit is set */
    -static inline long __scanbit(unsigned long val, unsigned long max)
    -{
    - asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
    - return val;
    -}
    -
    -#define find_next_bit(addr,size,off) \
    -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
    - ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
    - find_next_bit(addr,size,off)))
    -
    -#define find_next_zero_bit(addr,size,off) \
    -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
    - ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
    - find_next_zero_bit(addr,size,off)))
    -
    -#define find_first_bit(addr, size) \
    - ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
    - ? (__scanbit(*(unsigned long *)(addr), (size))) \
    - : find_first_bit((addr), (size))))
    -
    -#define find_first_zero_bit(addr, size) \
    - ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
    - ? (__scanbit(~*(unsigned long *)(addr), (size))) \
    - : find_first_zero_bit((addr), (size))))
    -
    -static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
    - int len)
    -{
    - unsigned long end = i + len;
    - while (i < end) {
    - __set_bit(i, bitmap);
    - i++;
    - }
    -}
    -
    -/**
    - * ffz - find first zero in word.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long ffz(unsigned long word)
    -{
    - __asm__("bsfq %1,%0"
    - :"=r" (word)
    - :"r" (~word));
    - return word;
    -}
    -
    -/**
    - * __ffs - find first bit in word.
    - * @word: The word to search
    - *
    - * Undefined if no bit exists, so code should check against 0 first.
    - */
    -static inline unsigned long __ffs(unsigned long word)
    -{
    - __asm__("bsfq %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -/*
    - * __fls: find last bit set.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long __fls(unsigned long word)
    -{
    - __asm__("bsrq %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -/**
    - * ffs - find first bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as
    - * the libc and compiler builtin ffs routines, therefore
    - * differs in spirit from the above ffz (man ffs).
    - */
    -static inline int ffs(int x)
    -{
    - int r;
    -
    - __asm__("bsfl %1,%0\n\t"
    - "cmovzl %2,%0"
    - : "=r" (r) : "rm" (x), "r" (-1));
    - return r+1;
    -}
    -
    -/**
    - * fls64 - find last bit set in 64 bit word
    - * @x: the word to search
    - *
    - * This is defined the same way as fls.
    - */
    -static inline int fls64(__u64 x)
    -{
    - if (x == 0)
    - return 0;
    - return __fls(x) + 1;
    -}
    -
    -/**
    - * fls - find last bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as ffs.
    - */
    -static inline int fls(int x)
    -{
    - int r;
    -
    - __asm__("bsrl %1,%0\n\t"
    - "cmovzl %2,%0"
    - : "=&r" (r) : "rm" (x), "rm" (-1));
    - return r+1;
    -}
    -
    -#define ARCH_HAS_FAST_MULTIPLIER 1
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -#define ext2_set_bit_atomic(lock, nr, addr) \
    - test_and_set_bit((nr), (unsigned long *)(addr))
    -#define ext2_clear_bit_atomic(lock, nr, addr) \
    - test_and_clear_bit((nr), (unsigned long *)(addr))
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#endif /* _X86_64_BITOPS_H */
    diff --git a/include/linux/bitops.h b/include/linux/bitops.h
    index 40d5473..48bde60 100644
    --- a/include/linux/bitops.h
    +++ b/include/linux/bitops.h
    @@ -112,4 +112,144 @@ static inline unsigned fls_long(unsigned long l)
    return fls64(l);
    }

    +#ifdef __KERNEL__
    +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
    +extern unsigned long __find_first_bit(const unsigned long *addr,
    + unsigned long size);
    +
    +/**
    + * find_first_bit - find the first set bit in a memory region
    + * @addr: The address to start the search at
    + * @size: The maximum size to search
    + *
    + * Returns the bit number of the first set bit.
    + */
    +static __always_inline unsigned long
    +find_first_bit(const unsigned long *addr, unsigned long size)
    +{
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG))
    + return __ffs((*addr) | (1ul << size));
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
    + return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr);
    +
    + /* size is not constant or too big */
    + return __find_first_bit(addr, size);
    +}
    +
    +extern unsigned long __find_first_zero_bit(const unsigned long *addr,
    + unsigned long size);
    +
    +/**
    + * find_first_zero_bit - find the first cleared bit in a memory region
    + * @addr: The address to start the search at
    + * @size: The maximum size to search
    + *
    + * Returns the bit number of the first cleared bit.
    + */
    +static __always_inline unsigned long
    +find_first_zero_bit(const unsigned long *addr, unsigned long size)
    +{
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + return __ffs(~(*addr) | (1ul << size));
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
    + return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr));
    +
    + /* size is not constant or too big */
    + return __find_first_zero_bit(addr, size);
    +}
    +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
    +
    +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
    +extern unsigned long __find_next_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset);
    +
    +/**
    + * find_next_bit - find the next set bit in a memory region
    + * @addr: The address to base the search on
    + * @offset: The bitnumber to start searching at
    + * @size: The bitmap size in bits
    + */
    +static __always_inline unsigned long
    +find_next_bit(const unsigned long *addr, unsigned long size,
    + unsigned long offset)
    +{
    + unsigned long value;
    +
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + value = (*addr) & ((~0ul) << offset);
    + value |= (1ul << size);
    + return __ffs(value);
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
    + value = (*addr) & ((~0ul) << offset);
    + return (value == 0) ? BITS_PER_LONG : __ffs(value);
    + }
    +
    + /* size is not constant or too big */
    + return __find_next_bit(addr, size, offset);
    +}
    +
    +extern unsigned long __find_next_zero_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset);
    +
    +/**
    + * find_next_zero_bit - find the next cleared bit in a memory region
    + * @addr: The address to base the search on
    + * @offset: The bitnumber to start searching at
    + * @size: The bitmap size in bits
    + */
    +static __always_inline unsigned long
    +find_next_zero_bit(const unsigned long *addr, unsigned long size,
    + unsigned long offset)
    +{
    + unsigned long value;
    +
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + value = (~(*addr)) & ((~0ul) << offset);
    + value |= (1ul << size);
    + return __ffs(value);
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
    + value = (~(*addr)) & ((~0ul) << offset);
    + return (value == 0) ? BITS_PER_LONG : __ffs(value);
    + }
    +
    + /* size is not constant or too big */
    + return __find_next_zero_bit(addr, size, offset);
    +}
    +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
    +#endif /* __KERNEL__ */
    #endif
    diff --git a/lib/Makefile b/lib/Makefile
    index bf8000f..2d7001b 100644
    --- a/lib/Makefile
    +++ b/lib/Makefile
    @@ -29,6 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
    obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
    lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
    lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
    +lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
    lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
    obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
    obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
    diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
    index 78ccd73..d3f5784 100644
    --- a/lib/find_next_bit.c
    +++ b/lib/find_next_bit.c
    @@ -16,14 +16,12 @@

    #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)

    -/**
    - * find_next_bit - find the next set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
    +/*
    + * Find the next set bit in a memory region.
    */
    -unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
    - unsigned long offset)
    +unsigned long __find_next_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset)
    {
    const unsigned long *p = addr + BITOP_WORD(offset);
    unsigned long result = offset & ~(BITS_PER_LONG-1);
    @@ -60,15 +58,14 @@ found_first:
    found_middle:
    return result + __ffs(tmp);
    }
    -
    -EXPORT_SYMBOL(find_next_bit);
    +EXPORT_SYMBOL(__find_next_bit);

    /*
    * This implementation of find_{first,next}_zero_bit was stolen from
    * Linus' asm-alpha/bitops.h.
    */
    -unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
    - unsigned long offset)
    +unsigned long __find_next_zero_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset)
    {
    const unsigned long *p = addr + BITOP_WORD(offset);
    unsigned long result = offset & ~(BITS_PER_LONG-1);
    @@ -105,8 +102,64 @@ found_first:
    found_middle:
    return result + ffz(tmp);
    }
    +EXPORT_SYMBOL(__find_next_zero_bit);
    +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
    +
    +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
    +/*
    + * Find the first set bit in a memory region.
    + */
    +unsigned long __find_first_bit(const unsigned long *addr,
    + unsigned long size)
    +{
    + const unsigned long *p = addr;
    + unsigned long result = 0;
    + unsigned long tmp;

    -EXPORT_SYMBOL(find_next_zero_bit);
    + while (size & ~(BITS_PER_LONG-1)) {
    + if ((tmp = *(p++)))
    + goto found;
    + result += BITS_PER_LONG;
    + size -= BITS_PER_LONG;
    + }
    + if (!size)
    + return result;
    +
    + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
    + if (tmp == 0UL) /* Are any bits set? */
    + return result + size; /* Nope. */
    +found:
    + return result + __ffs(tmp);
    +}
    +EXPORT_SYMBOL(__find_first_bit);
    +
    +/*
    + * Find the first cleared bit in a memory region.
    + */
    +unsigned long __find_first_zero_bit(const unsigned long *addr,
    + unsigned long size)
    +{
    + const unsigned long *p = addr;
    + unsigned long result = 0;
    + unsigned long tmp;
    +
    + while (size & ~(BITS_PER_LONG-1)) {
    + if (~(tmp = *(p++)))
    + goto found;
    + result += BITS_PER_LONG;
    + size -= BITS_PER_LONG;
    + }
    + if (!size)
    + return result;
    +
    + tmp = (*p) | (~0UL << size);
    + if (tmp == ~0UL) /* Are any bits zero? */
    + return result + size; /* Nope. */
    +found:
    + return result + ffz(tmp);
    +}
    +EXPORT_SYMBOL(__find_first_zero_bit);
    +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */

    #ifdef __BIG_ENDIAN

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. Re: [git pull] generic bitops



    On Thu, 24 Apr 2008, Ingo Molnar wrote:
    >
    > this started out as improvements/generalizations to x86 bitops, but grew
    > generic impact (and generic optimizations) as well, so it's offered as a
    > separate tree.


    Can you do the config thing differently?

    > diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
    > index e09edfa..49990ea 100644
    > --- a/arch/um/Kconfig.i386
    > +++ b/arch/um/Kconfig.i386
    > @@ -39,6 +39,14 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA
    > bool
    > default y
    >
    > +config GENERIC_FIND_FIRST_BIT
    > + bool
    > + default y
    > +
    > +config GENERIC_FIND_NEXT_BIT
    > + bool
    > + default y
    > +


    ... because instead of having each architecture do these kinds of things,
    we're trying to make the *generic* code (ie lib/Kconfig) do

    config GENERIC_FIND_FIRST_BIT
    bool
    default n

    so that you only have one single declaration (close to where the code
    actually exists), and then the architectures would just do

    select GENERIC_FIND_FIRST_BIT
    select GENERIC_FIND_NEXT_BIT

    in their architecture settings. That's how we're now doing things like
    HAVE_IDE, HAVE_OPROFILE, etc etc.

    Hmm?

    Linus
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. Re: [git pull] generic bitops

    On Thu, 2008-04-24 at 15:14 -0700, Linus Torvalds wrote:
    >
    > On Thu, 24 Apr 2008, Ingo Molnar wrote:
    > >
    > > this started out as improvements/generalizations to x86 bitops, but grew
    > > generic impact (and generic optimizations) as well, so it's offered as a
    > > separate tree.

    >
    > Can you do the config thing differently?
    >
    > > diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
    > > index e09edfa..49990ea 100644
    > > --- a/arch/um/Kconfig.i386
    > > +++ b/arch/um/Kconfig.i386
    > > @@ -39,6 +39,14 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA
    > > bool
    > > default y
    > >
    > > +config GENERIC_FIND_FIRST_BIT
    > > + bool
    > > + default y
    > > +
    > > +config GENERIC_FIND_NEXT_BIT
    > > + bool
    > > + default y
    > > +

    >
    > .. because instead of having each architecture do these kinds of things,
    > we're trying to make the *generic* code (ie lib/Kconfig) do
    >
    > config GENERIC_FIND_FIRST_BIT
    > bool
    > default n


    config HAVE_GENERIC_FIND_FIRST_BIT
    def_bool n

    (I know it defaults to n, but this pattern is becoming suggestive of
    features that must be selected, in addition to the HAVE_ prefix)

    Although I think for lib stuff, we may want to think about WANT_
    prefixes to suggest you have to opt in.

    Harvey

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. [PATCH] x86, bitops: select the generic bitmap search functions

    Introduce GENERIC_FIND_FIRST_BIT and GENERIC_FIND_NEXT_BIT in
    lib/Kconfig, defaulting to off. An arch that wants to use the
    generic implementation now only has to use a select statement
    to include them.

    I added an always-y option (X86_CPU) to arch/x86/Kconfig.cpu
    and used that to select the generic search functions. This
    way ARCH=um SUBARCH=i386 automatically picks up the change
    too, and arch/um/Kconfig.i386 can therefore be simplified a
    bit. ARCH=um SUBARCH=x86_64 does things differently, but
    still compiles fine. It seems that a "def_bool y" always
    wins over a "def_bool n"?

    Signed-off-by: Alexander van Heukelum

    ---

    arch/um/Kconfig.i386 | 8 --------
    arch/x86/Kconfig | 6 ------
    arch/x86/Kconfig.cpu | 5 +++++
    lib/Kconfig | 6 ++++++
    4 files changed, 11 insertions(+), 14 deletions(-)

    Hello Linus,

    I think this is close to what you had in mind?

    The patch applies on top of:

    git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-generic-bitops.git for-linus

    Compile tested um/i386, um/x86_64. Boot-tested using
    qemu for i386 and x86_64.

    Greetings,
    Alexander

    diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
    index 49990ea..e09edfa 100644
    --- a/arch/um/Kconfig.i386
    +++ b/arch/um/Kconfig.i386
    @@ -39,14 +39,6 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA
    bool
    default y

    -config GENERIC_FIND_FIRST_BIT
    - bool
    - default y
    -
    -config GENERIC_FIND_NEXT_BIT
    - bool
    - default y
    -
    config GENERIC_HWEIGHT
    bool
    default y
    diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
    index 004aad8..4d350b5 100644
    --- a/arch/x86/Kconfig
    +++ b/arch/x86/Kconfig
    @@ -77,12 +77,6 @@ config GENERIC_BUG
    def_bool y
    depends on BUG

    -config GENERIC_FIND_FIRST_BIT
    - def_bool y
    -
    -config GENERIC_FIND_NEXT_BIT
    - def_bool y
    -
    config GENERIC_HWEIGHT
    def_bool y

    diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
    index b9368f5..b94b04e 100644
    --- a/arch/x86/Kconfig.cpu
    +++ b/arch/x86/Kconfig.cpu
    @@ -278,6 +278,11 @@ config GENERIC_CPU

    endchoice

    +config X86_CPU
    + def_bool y
    + select GENERIC_FIND_FIRST_BIT
    + select GENERIC_FIND_NEXT_BIT
    +
    config X86_GENERIC
    bool "Generic x86 support"
    depends on X86_32
    diff --git a/lib/Kconfig b/lib/Kconfig
    index 2d53dc0..8cc8e87 100644
    --- a/lib/Kconfig
    +++ b/lib/Kconfig
    @@ -7,6 +7,12 @@ menu "Library routines"
    config BITREVERSE
    tristate

    +config GENERIC_FIND_FIRST_BIT
    + def_bool n
    +
    +config GENERIC_FIND_NEXT_BIT
    + def_bool n
    +
    config CRC_CCITT
    tristate "CRC-CCITT functions"
    help
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  5. [git pull] generic bitops, take 2


    * Linus Torvalds wrote:

    > > this started out as improvements/generalizations to x86 bitops, but
    > > grew generic impact (and generic optimizations) as well, so it's
    > > offered as a separate tree.

    >
    > Can you do the config thing differently?


    i've added Alexander's patch that does the cleanup suggested by you, you
    can pull the updated tree from:

    git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-generic-bitops-v2.git for-linus

    Thanks,

    Ingo

    ------------------>
    Alexander van Heukelum (13):
    x86: change x86 to use generic find_next_bit
    x86, uml: fix uml with generic find_next_bit for x86
    x86, generic: optimize find_next_(zero_)bit for small constant-size bitmaps
    x86: merge the simple bitops and move them to bitops.h
    generic: introduce a generic __fls implementation
    generic: implement __fls on all 64-bit archs
    bitops: use __fls for fls64 on 64-bit archs
    x86: generic versions of find_first_(zero_)bit, convert i386
    x86: switch 64-bit to generic find_first_bit
    x86: optimize find_first_bit for small bitmaps
    x86, UML: remove x86-specific implementations of find_first_bit
    x86: finalize bitops unification
    x86, bitops: select the generic bitmap search functions

    Joe Perches (1):
    x86: include/asm-x86/pgalloc.h/bitops.h: checkpatch cleanups - formatting only

    arch/um/Kconfig.x86_64 | 8 ++
    arch/um/sys-i386/Makefile | 2 +-
    arch/um/sys-x86_64/Makefile | 2 +-
    arch/x86/Kconfig.cpu | 7 ++-
    arch/x86/lib/Makefile | 3 +-
    arch/x86/lib/bitops_32.c | 70 --------------
    arch/x86/lib/bitops_64.c | 175 ------------------------------------
    include/asm-alpha/bitops.h | 5 +
    include/asm-generic/bitops/__fls.h | 43 +++++++++
    include/asm-generic/bitops/find.h | 2 +
    include/asm-generic/bitops/fls64.h | 22 +++++
    include/asm-ia64/bitops.h | 16 ++++
    include/asm-mips/bitops.h | 5 +
    include/asm-parisc/bitops.h | 1 +
    include/asm-powerpc/bitops.h | 5 +
    include/asm-s390/bitops.h | 1 +
    include/asm-sh/bitops.h | 1 +
    include/asm-sparc64/bitops.h | 1 +
    include/asm-x86/bitops.h | 149 ++++++++++++++++++++++++++++---
    include/asm-x86/bitops_32.h | 166 ----------------------------------
    include/asm-x86/bitops_64.h | 162 ---------------------------------
    include/linux/bitops.h | 140 ++++++++++++++++++++++++++++
    lib/Kconfig | 6 ++
    lib/Makefile | 1 +
    lib/find_next_bit.c | 77 +++++++++++++---
    25 files changed, 467 insertions(+), 603 deletions(-)
    delete mode 100644 arch/x86/lib/bitops_32.c
    delete mode 100644 arch/x86/lib/bitops_64.c
    create mode 100644 include/asm-generic/bitops/__fls.h
    delete mode 100644 include/asm-x86/bitops_32.h
    delete mode 100644 include/asm-x86/bitops_64.h

    diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
    index 3fbe69e..7a75043 100644
    --- a/arch/um/Kconfig.x86_64
    +++ b/arch/um/Kconfig.x86_64
    @@ -27,6 +27,14 @@ config SMP_BROKEN
    bool
    default y

    +config GENERIC_FIND_FIRST_BIT
    + bool
    + default y
    +
    +config GENERIC_FIND_NEXT_BIT
    + bool
    + default y
    +
    config GENERIC_HWEIGHT
    bool
    default y
    diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
    index 964dc1a..598b5c1 100644
    --- a/arch/um/sys-i386/Makefile
    +++ b/arch/um/sys-i386/Makefile
    @@ -6,7 +6,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
    ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
    sys_call_table.o tls.o

    -subarch-obj-y = lib/bitops_32.o lib/semaphore_32.o lib/string_32.o
    +subarch-obj-y = lib/semaphore_32.o lib/string_32.o
    subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
    subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o

    diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
    index 3c22de5..c8b4cce 100644
    --- a/arch/um/sys-x86_64/Makefile
    +++ b/arch/um/sys-x86_64/Makefile
    @@ -10,7 +10,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \

    obj-$(CONFIG_MODULES) += um_module.o

    -subarch-obj-y = lib/bitops_64.o lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
    +subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
    subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o

    ldt-y = ../sys-i386/ldt.o
    diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
    index 57072f2..b94b04e 100644
    --- a/arch/x86/Kconfig.cpu
    +++ b/arch/x86/Kconfig.cpu
    @@ -278,6 +278,11 @@ config GENERIC_CPU

    endchoice

    +config X86_CPU
    + def_bool y
    + select GENERIC_FIND_FIRST_BIT
    + select GENERIC_FIND_NEXT_BIT
    +
    config X86_GENERIC
    bool "Generic x86 support"
    depends on X86_32
    @@ -398,7 +403,7 @@ config X86_TSC
    # generates cmov.
    config X86_CMOV
    def_bool y
    - depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7)
    + depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || X86_64)

    config X86_MINIMUM_CPU_FAMILY
    int
    diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
    index 25df1c1..76f60f5 100644
    --- a/arch/x86/lib/Makefile
    +++ b/arch/x86/lib/Makefile
    @@ -11,7 +11,7 @@ lib-y += memcpy_$(BITS).o
    ifeq ($(CONFIG_X86_32),y)
    lib-y += checksum_32.o
    lib-y += strstr_32.o
    - lib-y += bitops_32.o semaphore_32.o string_32.o
    + lib-y += semaphore_32.o string_32.o

    lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
    else
    @@ -21,7 +21,6 @@ else

    lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
    lib-y += thunk_64.o clear_page_64.o copy_page_64.o
    - lib-y += bitops_64.o
    lib-y += memmove_64.o memset_64.o
    lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
    endif
    diff --git a/arch/x86/lib/bitops_32.c b/arch/x86/lib/bitops_32.c
    deleted file mode 100644
    index b654404..0000000
    --- a/arch/x86/lib/bitops_32.c
    +++ /dev/null
    @@ -1,70 +0,0 @@
    -#include
    -#include
    -
    -/**
    - * find_next_bit - find the next set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_bit(const unsigned long *addr, int size, int offset)
    -{
    - const unsigned long *p = addr + (offset >> 5);
    - int set = 0, bit = offset & 31, res;
    -
    - if (bit) {
    - /*
    - * Look for nonzero in the first 32 bits:
    - */
    - __asm__("bsfl %1,%0\n\t"
    - "jne 1f\n\t"
    - "movl $32, %0\n"
    - "1:"
    - : "=r" (set)
    - : "r" (*p >> bit));
    - if (set < (32 - bit))
    - return set + offset;
    - set = 32 - bit;
    - p++;
    - }
    - /*
    - * No set bit yet, search remaining full words for a bit
    - */
    - res = find_first_bit (p, size - 32 * (p - addr));
    - return (offset + set + res);
    -}
    -EXPORT_SYMBOL(find_next_bit);
    -
    -/**
    - * find_next_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_zero_bit(const unsigned long *addr, int size, int offset)
    -{
    - const unsigned long *p = addr + (offset >> 5);
    - int set = 0, bit = offset & 31, res;
    -
    - if (bit) {
    - /*
    - * Look for zero in the first 32 bits.
    - */
    - __asm__("bsfl %1,%0\n\t"
    - "jne 1f\n\t"
    - "movl $32, %0\n"
    - "1:"
    - : "=r" (set)
    - : "r" (~(*p >> bit)));
    - if (set < (32 - bit))
    - return set + offset;
    - set = 32 - bit;
    - p++;
    - }
    - /*
    - * No zero yet, search remaining full bytes for a zero
    - */
    - res = find_first_zero_bit(p, size - 32 * (p - addr));
    - return (offset + set + res);
    -}
    -EXPORT_SYMBOL(find_next_zero_bit);
    diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c
    deleted file mode 100644
    index 0e8f491..0000000
    --- a/arch/x86/lib/bitops_64.c
    +++ /dev/null
    @@ -1,175 +0,0 @@
    -#include
    -
    -#undef find_first_zero_bit
    -#undef find_next_zero_bit
    -#undef find_first_bit
    -#undef find_next_bit
    -
    -static inline long
    -__find_first_zero_bit(const unsigned long * addr, unsigned long size)
    -{
    - long d0, d1, d2;
    - long res;
    -
    - /*
    - * We must test the size in words, not in bits, because
    - * otherwise incoming sizes in the range -63..-1 will not run
    - * any scasq instructions, and then the flags used by the je
    - * instruction will have whatever random value was in place
    - * before. Nobody should call us like that, but
    - * find_next_zero_bit() does when offset and size are at the
    - * same word and it fails to find a zero itself.
    - */
    - size += 63;
    - size >>= 6;
    - if (!size)
    - return 0;
    - asm volatile(
    - " repe; scasq\n"
    - " je 1f\n"
    - " xorq -8(%%rdi),%%rax\n"
    - " subq $8,%%rdi\n"
    - " bsfq %%rax,%%rdx\n"
    - "1: subq %[addr],%%rdi\n"
    - " shlq $3,%%rdi\n"
    - " addq %%rdi,%%rdx"
    - :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
    - :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
    - [addr] "S" (addr) : "memory");
    - /*
    - * Any register would do for [addr] above, but GCC tends to
    - * prefer rbx over rsi, even though rsi is readily available
    - * and doesn't have to be saved.
    - */
    - return res;
    -}
    -
    -/**
    - * find_first_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit-number of the first zero bit, not the number of the byte
    - * containing a bit.
    - */
    -long find_first_zero_bit(const unsigned long * addr, unsigned long size)
    -{
    - return __find_first_zero_bit (addr, size);
    -}
    -
    -/**
    - * find_next_zero_bit - find the next zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -long find_next_zero_bit (const unsigned long * addr, long size, long offset)
    -{
    - const unsigned long * p = addr + (offset >> 6);
    - unsigned long set = 0;
    - unsigned long res, bit = offset&63;
    -
    - if (bit) {
    - /*
    - * Look for zero in first word
    - */
    - asm("bsfq %1,%0\n\t"
    - "cmoveq %2,%0"
    - : "=r" (set)
    - : "r" (~(*p >> bit)), "r"(64L));
    - if (set < (64 - bit))
    - return set + offset;
    - set = 64 - bit;
    - p++;
    - }
    - /*
    - * No zero yet, search remaining full words for a zero
    - */
    - res = __find_first_zero_bit (p, size - 64 * (p - addr));
    -
    - return (offset + set + res);
    -}
    -
    -static inline long
    -__find_first_bit(const unsigned long * addr, unsigned long size)
    -{
    - long d0, d1;
    - long res;
    -
    - /*
    - * We must test the size in words, not in bits, because
    - * otherwise incoming sizes in the range -63..-1 will not run
    - * any scasq instructions, and then the flags used by the jz
    - * instruction will have whatever random value was in place
    - * before. Nobody should call us like that, but
    - * find_next_bit() does when offset and size are at the same
    - * word and it fails to find a one itself.
    - */
    - size += 63;
    - size >>= 6;
    - if (!size)
    - return 0;
    - asm volatile(
    - " repe; scasq\n"
    - " jz 1f\n"
    - " subq $8,%%rdi\n"
    - " bsfq (%%rdi),%%rax\n"
    - "1: subq %[addr],%%rdi\n"
    - " shlq $3,%%rdi\n"
    - " addq %%rdi,%%rax"
    - :"=a" (res), "=&c" (d0), "=&D" (d1)
    - :"0" (0ULL), "1" (size), "2" (addr),
    - [addr] "r" (addr) : "memory");
    - return res;
    -}
    -
    -/**
    - * find_first_bit - find the first set bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit-number of the first set bit, not the number of the byte
    - * containing a bit.
    - */
    -long find_first_bit(const unsigned long * addr, unsigned long size)
    -{
    - return __find_first_bit(addr,size);
    -}
    -
    -/**
    - * find_next_bit - find the first set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -long find_next_bit(const unsigned long * addr, long size, long offset)
    -{
    - const unsigned long * p = addr + (offset >> 6);
    - unsigned long set = 0, bit = offset & 63, res;
    -
    - if (bit) {
    - /*
    - * Look for nonzero in the first 64 bits:
    - */
    - asm("bsfq %1,%0\n\t"
    - "cmoveq %2,%0\n\t"
    - : "=r" (set)
    - : "r" (*p >> bit), "r" (64L));
    - if (set < (64 - bit))
    - return set + offset;
    - set = 64 - bit;
    - p++;
    - }
    - /*
    - * No set bit yet, search remaining full words for a bit
    - */
    - res = __find_first_bit (p, size - 64 * (p - addr));
    - return (offset + set + res);
    -}
    -
    -#include
    -
    -EXPORT_SYMBOL(find_next_bit);
    -EXPORT_SYMBOL(find_first_bit);
    -EXPORT_SYMBOL(find_first_zero_bit);
    -EXPORT_SYMBOL(find_next_zero_bit);
    diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
    index 9e19a70..15f3ae2 100644
    --- a/include/asm-alpha/bitops.h
    +++ b/include/asm-alpha/bitops.h
    @@ -388,6 +388,11 @@ static inline int fls64(unsigned long x)
    }
    #endif

    +static inline unsigned long __fls(unsigned long x)
    +{
    + return fls64(x) - 1;
    +}
    +
    static inline int fls(int x)
    {
    return fls64((unsigned int) x);
    diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
    new file mode 100644
    index 0000000..be24465
    --- /dev/null
    +++ b/include/asm-generic/bitops/__fls.h
    @@ -0,0 +1,43 @@
    +#ifndef _ASM_GENERIC_BITOPS___FLS_H_
    +#define _ASM_GENERIC_BITOPS___FLS_H_
    +
    +#include
    +
    +/**
    + * __fls - find last (most-significant) set bit in a long word
    + * @word: the word to search
    + *
    + * Undefined if no set bit exists, so code should check against 0 first.
    + */
    +static inline unsigned long __fls(unsigned long word)
    +{
    + int num = BITS_PER_LONG - 1;
    +
    +#if BITS_PER_LONG == 64
    + if (!(word & (~0ul << 32))) {
    + num -= 32;
    + word <<= 32;
    + }
    +#endif
    + if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
    + num -= 16;
    + word <<= 16;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
    + num -= 8;
    + word <<= 8;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
    + num -= 4;
    + word <<= 4;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
    + num -= 2;
    + word <<= 2;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-1))))
    + num -= 1;
    + return num;
    +}
    +
    +#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
    diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
    index 72a51e5..1914e97 100644
    --- a/include/asm-generic/bitops/find.h
    +++ b/include/asm-generic/bitops/find.h
    @@ -1,11 +1,13 @@
    #ifndef _ASM_GENERIC_BITOPS_FIND_H_
    #define _ASM_GENERIC_BITOPS_FIND_H_

    +#ifndef CONFIG_GENERIC_FIND_NEXT_BIT
    extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
    size, unsigned long offset);

    extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
    long size, unsigned long offset);
    +#endif

    #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
    #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
    diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
    index 1b6b17c..86d403f 100644
    --- a/include/asm-generic/bitops/fls64.h
    +++ b/include/asm-generic/bitops/fls64.h
    @@ -3,6 +3,18 @@

    #include

    +/**
    + * fls64 - find last set bit in a 64-bit word
    + * @x: the word to search
    + *
    + * This is defined in a similar way as the libc and compiler builtin
    + * ffsll, but returns the position of the most significant set bit.
    + *
    + * fls64(value) returns 0 if value is 0 or the position of the last
    + * set bit if value is nonzero. The last (most significant) bit is
    + * at position 64.
    + */
    +#if BITS_PER_LONG == 32
    static inline int fls64(__u64 x)
    {
    __u32 h = x >> 32;
    @@ -10,5 +22,15 @@ static inline int fls64(__u64 x)
    return fls(h) + 32;
    return fls(x);
    }
    +#elif BITS_PER_LONG == 64
    +static inline int fls64(__u64 x)
    +{
    + if (x == 0)
    + return 0;
    + return __fls(x) + 1;
    +}
    +#else
    +#error BITS_PER_LONG not 32 or 64
    +#endif

    #endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
    diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
    index 953d3df..e2ca800 100644
    --- a/include/asm-ia64/bitops.h
    +++ b/include/asm-ia64/bitops.h
    @@ -407,6 +407,22 @@ fls (int t)
    return ia64_popcnt(x);
    }

    +/*
    + * Find the last (most significant) bit set. Undefined for x==0.
    + * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
    + */
    +static inline unsigned long
    +__fls (unsigned long x)
    +{
    + x |= x >> 1;
    + x |= x >> 2;
    + x |= x >> 4;
    + x |= x >> 8;
    + x |= x >> 16;
    + x |= x >> 32;
    + return ia64_popcnt(x) - 1;
    +}
    +
    #include

    /*
    diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
    index ec75ce4..c2bd126 100644
    --- a/include/asm-mips/bitops.h
    +++ b/include/asm-mips/bitops.h
    @@ -591,6 +591,11 @@ static inline int __ilog2(unsigned long x)
    return 63 - lz;
    }

    +static inline unsigned long __fls(unsigned long x)
    +{
    + return __ilog2(x);
    +}
    +
    #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)

    /*
    diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
    index f8eebcb..7a6ea10 100644
    --- a/include/asm-parisc/bitops.h
    +++ b/include/asm-parisc/bitops.h
    @@ -210,6 +210,7 @@ static __inline__ int fls(int x)
    return ret;
    }

    +#include
    #include
    #include
    #include
    diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
    index a99a749..897eade 100644
    --- a/include/asm-powerpc/bitops.h
    +++ b/include/asm-powerpc/bitops.h
    @@ -313,6 +313,11 @@ static __inline__ int fls(unsigned int x)
    return 32 - lz;
    }

    +static __inline__ unsigned long __fls(unsigned long x)
    +{
    + return __ilog2(x);
    +}
    +
    /*
    * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
    * instruction; for 32-bit we use the generic version, which does two
    diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
    index 965394e..b4eb24a 100644
    --- a/include/asm-s390/bitops.h
    +++ b/include/asm-s390/bitops.h
    @@ -769,6 +769,7 @@ static inline int sched_find_first_bit(unsigned long *b)
    }

    #include
    +#include
    #include

    #include
    diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
    index b6ba5a6..d7d382f 100644
    --- a/include/asm-sh/bitops.h
    +++ b/include/asm-sh/bitops.h
    @@ -95,6 +95,7 @@ static inline unsigned long ffz(unsigned long word)
    #include
    #include
    #include
    +#include
    #include

    #endif /* __KERNEL__ */
    diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
    index 982ce89..11f9d81 100644
    --- a/include/asm-sparc64/bitops.h
    +++ b/include/asm-sparc64/bitops.h
    @@ -34,6 +34,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
    #include
    #include
    #include
    +#include
    #include

    #ifdef __KERNEL__
    diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
    index 1ae7b27..b81a4d4 100644
    --- a/include/asm-x86/bitops.h
    +++ b/include/asm-x86/bitops.h
    @@ -62,12 +62,9 @@ static inline void set_bit(int nr, volatile void *addr)
    */
    static inline void __set_bit(int nr, volatile void *addr)
    {
    - asm volatile("bts %1,%0"
    - : ADDR
    - : "Ir" (nr) : "memory");
    + asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
    }

    -
    /**
    * clear_bit - Clears a bit in memory
    * @nr: Bit to clear
    @@ -297,19 +294,145 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
    static int test_bit(int nr, const volatile unsigned long *addr);
    #endif

    -#define test_bit(nr,addr) \
    - (__builtin_constant_p(nr) ? \
    - constant_test_bit((nr),(addr)) : \
    - variable_test_bit((nr),(addr)))
    +#define test_bit(nr, addr) \
    + (__builtin_constant_p((nr)) \
    + ? constant_test_bit((nr), (addr)) \
    + : variable_test_bit((nr), (addr)))
    +
    +/**
    + * __ffs - find first set bit in word
    + * @word: The word to search
    + *
    + * Undefined if no bit exists, so code should check against 0 first.
    + */
    +static inline unsigned long __ffs(unsigned long word)
    +{
    + asm("bsf %1,%0"
    + : "=r" (word)
    + : "rm" (word));
    + return word;
    +}
    +
    +/**
    + * ffz - find first zero bit in word
    + * @word: The word to search
    + *
    + * Undefined if no zero exists, so code should check against ~0UL first.
    + */
    +static inline unsigned long ffz(unsigned long word)
    +{
    + asm("bsf %1,%0"
    + : "=r" (word)
    + : "r" (~word));
    + return word;
    +}
    +
    +/*
    + * __fls: find last set bit in word
    + * @word: The word to search
    + *
    + * Undefined if no zero exists, so code should check against ~0UL first.
    + */
    +static inline unsigned long __fls(unsigned long word)
    +{
    + asm("bsr %1,%0"
    + : "=r" (word)
    + : "rm" (word));
    + return word;
    +}
    +
    +#ifdef __KERNEL__
    +/**
    + * ffs - find first set bit in word
    + * @x: the word to search
    + *
    + * This is defined the same way as the libc and compiler builtin ffs
    + * routines, therefore differs in spirit from the other bitops.
    + *
    + * ffs(value) returns 0 if value is 0 or the position of the first
    + * set bit if value is nonzero. The first (least significant) bit
    + * is at position 1.
    + */
    +static inline int ffs(int x)
    +{
    + int r;
    +#ifdef CONFIG_X86_CMOV
    + asm("bsfl %1,%0\n\t"
    + "cmovzl %2,%0"
    + : "=r" (r) : "rm" (x), "r" (-1));
    +#else
    + asm("bsfl %1,%0\n\t"
    + "jnz 1f\n\t"
    + "movl $-1,%0\n"
    + "1:" : "=r" (r) : "rm" (x));
    +#endif
    + return r + 1;
    +}
    +
    +/**
    + * fls - find last set bit in word
    + * @x: the word to search
    + *
    + * This is defined in a similar way as the libc and compiler builtin
    + * ffs, but returns the position of the most significant set bit.
    + *
    + * fls(value) returns 0 if value is 0 or the position of the last
    + * set bit if value is nonzero. The last (most significant) bit is
    + * at position 32.
    + */
    +static inline int fls(int x)
    +{
    + int r;
    +#ifdef CONFIG_X86_CMOV
    + asm("bsrl %1,%0\n\t"
    + "cmovzl %2,%0"
    + : "=&r" (r) : "rm" (x), "rm" (-1));
    +#else
    + asm("bsrl %1,%0\n\t"
    + "jnz 1f\n\t"
    + "movl $-1,%0\n"
    + "1:" : "=r" (r) : "rm" (x));
    +#endif
    + return r + 1;
    +}
    +#endif /* __KERNEL__ */

    #undef BASE_ADDR
    #undef BIT_ADDR
    #undef ADDR

    -#ifdef CONFIG_X86_32
    -# include "bitops_32.h"
    -#else
    -# include "bitops_64.h"
    -#endif
    +static inline void set_bit_string(unsigned long *bitmap,
    + unsigned long i, int len)
    +{
    + unsigned long end = i + len;
    + while (i < end) {
    + __set_bit(i, bitmap);
    + i++;
    + }
    +}
    +
    +#ifdef __KERNEL__
    +
    +#include
    +
    +#define ARCH_HAS_FAST_MULTIPLIER 1
    +
    +#include
    +
    +#endif /* __KERNEL__ */
    +
    +#include
    +
    +#ifdef __KERNEL__
    +
    +#include
    +
    +#define ext2_set_bit_atomic(lock, nr, addr) \
    + test_and_set_bit((nr), (unsigned long *)(addr))
    +#define ext2_clear_bit_atomic(lock, nr, addr) \
    + test_and_clear_bit((nr), (unsigned long *)(addr))
    +
    +#include

    +#endif /* __KERNEL__ */
    #endif /* _ASM_X86_BITOPS_H */
    diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
    deleted file mode 100644
    index 2513a81..0000000
    --- a/include/asm-x86/bitops_32.h
    +++ /dev/null
    @@ -1,166 +0,0 @@
    -#ifndef _I386_BITOPS_H
    -#define _I386_BITOPS_H
    -
    -/*
    - * Copyright 1992, Linus Torvalds.
    - */
    -
    -/**
    - * find_first_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit number of the first zero bit, not the number of the byte
    - * containing a bit.
    - */
    -static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
    -{
    - int d0, d1, d2;
    - int res;
    -
    - if (!size)
    - return 0;
    - /* This looks at memory.
    - * Mark it volatile to tell gcc not to move it around
    - */
    - asm volatile("movl $-1,%%eax\n\t"
    - "xorl %%edx,%%edx\n\t"
    - "repe; scasl\n\t"
    - "je 1f\n\t"
    - "xorl -4(%%edi),%%eax\n\t"
    - "subl $4,%%edi\n\t"
    - "bsfl %%eax,%%edx\n"
    - "1:\tsubl %%ebx,%%edi\n\t"
    - "shll $3,%%edi\n\t"
    - "addl %%edi,%%edx"
    - : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
    - : "1" ((size + 31) >> 5), "2" (addr),
    - "b" (addr) : "memory");
    - return res;
    -}
    -
    -/**
    - * find_next_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bit number to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_zero_bit(const unsigned long *addr, int size, int offset);
    -
    -/**
    - * __ffs - find first bit in word.
    - * @word: The word to search
    - *
    - * Undefined if no bit exists, so code should check against 0 first.
    - */
    -static inline unsigned long __ffs(unsigned long word)
    -{
    - __asm__("bsfl %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -/**
    - * find_first_bit - find the first set bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit number of the first set bit, not the number of the byte
    - * containing a bit.
    - */
    -static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
    -{
    - unsigned x = 0;
    -
    - while (x < size) {
    - unsigned long val = *addr++;
    - if (val)
    - return __ffs(val) + x;
    - x += sizeof(*addr) << 3;
    - }
    - return x;
    -}
    -
    -/**
    - * find_next_bit - find the first set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bit number to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_bit(const unsigned long *addr, int size, int offset);
    -
    -/**
    - * ffz - find first zero in word.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long ffz(unsigned long word)
    -{
    - __asm__("bsfl %1,%0"
    - :"=r" (word)
    - :"r" (~word));
    - return word;
    -}
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -/**
    - * ffs - find first bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as
    - * the libc and compiler builtin ffs routines, therefore
    - * differs in spirit from the above ffz() (man ffs).
    - */
    -static inline int ffs(int x)
    -{
    - int r;
    -
    - __asm__("bsfl %1,%0\n\t"
    - "jnz 1f\n\t"
    - "movl $-1,%0\n"
    - "1:" : "=r" (r) : "rm" (x));
    - return r+1;
    -}
    -
    -/**
    - * fls - find last bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as ffs().
    - */
    -static inline int fls(int x)
    -{
    - int r;
    -
    - __asm__("bsrl %1,%0\n\t"
    - "jnz 1f\n\t"
    - "movl $-1,%0\n"
    - "1:" : "=r" (r) : "rm" (x));
    - return r+1;
    -}
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#include
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -#define ext2_set_bit_atomic(lock, nr, addr) \
    - test_and_set_bit((nr), (unsigned long *)(addr))
    -#define ext2_clear_bit_atomic(lock, nr, addr) \
    - test_and_clear_bit((nr), (unsigned long *)(addr))
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#endif /* _I386_BITOPS_H */
    diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
    deleted file mode 100644
    index 365f820..0000000
    --- a/include/asm-x86/bitops_64.h
    +++ /dev/null
    @@ -1,162 +0,0 @@
    -#ifndef _X86_64_BITOPS_H
    -#define _X86_64_BITOPS_H
    -
    -/*
    - * Copyright 1992, Linus Torvalds.
    - */
    -
    -extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
    -extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
    -extern long find_first_bit(const unsigned long *addr, unsigned long size);
    -extern long find_next_bit(const unsigned long *addr, long size, long offset);
    -
    -/* return index of first bet set in val or max when no bit is set */
    -static inline long __scanbit(unsigned long val, unsigned long max)
    -{
    - asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
    - return val;
    -}
    -
    -#define find_next_bit(addr,size,off) \
    -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
    - ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
    - find_next_bit(addr,size,off)))
    -
    -#define find_next_zero_bit(addr,size,off) \
    -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
    - ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
    - find_next_zero_bit(addr,size,off)))
    -
    -#define find_first_bit(addr, size) \
    - ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
    - ? (__scanbit(*(unsigned long *)(addr), (size))) \
    - : find_first_bit((addr), (size))))
    -
    -#define find_first_zero_bit(addr, size) \
    - ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
    - ? (__scanbit(~*(unsigned long *)(addr), (size))) \
    - : find_first_zero_bit((addr), (size))))
    -
    -static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
    - int len)
    -{
    - unsigned long end = i + len;
    - while (i < end) {
    - __set_bit(i, bitmap);
    - i++;
    - }
    -}
    -
    -/**
    - * ffz - find first zero in word.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long ffz(unsigned long word)
    -{
    - __asm__("bsfq %1,%0"
    - :"=r" (word)
    - :"r" (~word));
    - return word;
    -}
    -
    -/**
    - * __ffs - find first bit in word.
    - * @word: The word to search
    - *
    - * Undefined if no bit exists, so code should check against 0 first.
    - */
    -static inline unsigned long __ffs(unsigned long word)
    -{
    - __asm__("bsfq %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -/*
    - * __fls: find last bit set.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long __fls(unsigned long word)
    -{
    - __asm__("bsrq %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -/**
    - * ffs - find first bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as
    - * the libc and compiler builtin ffs routines, therefore
    - * differs in spirit from the above ffz (man ffs).
    - */
    -static inline int ffs(int x)
    -{
    - int r;
    -
    - __asm__("bsfl %1,%0\n\t"
    - "cmovzl %2,%0"
    - : "=r" (r) : "rm" (x), "r" (-1));
    - return r+1;
    -}
    -
    -/**
    - * fls64 - find last bit set in 64 bit word
    - * @x: the word to search
    - *
    - * This is defined the same way as fls.
    - */
    -static inline int fls64(__u64 x)
    -{
    - if (x == 0)
    - return 0;
    - return __fls(x) + 1;
    -}
    -
    -/**
    - * fls - find last bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as ffs.
    - */
    -static inline int fls(int x)
    -{
    - int r;
    -
    - __asm__("bsrl %1,%0\n\t"
    - "cmovzl %2,%0"
    - : "=&r" (r) : "rm" (x), "rm" (-1));
    - return r+1;
    -}
    -
    -#define ARCH_HAS_FAST_MULTIPLIER 1
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -#define ext2_set_bit_atomic(lock, nr, addr) \
    - test_and_set_bit((nr), (unsigned long *)(addr))
    -#define ext2_clear_bit_atomic(lock, nr, addr) \
    - test_and_clear_bit((nr), (unsigned long *)(addr))
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#endif /* _X86_64_BITOPS_H */
    diff --git a/include/linux/bitops.h b/include/linux/bitops.h
    index 40d5473..48bde60 100644
    --- a/include/linux/bitops.h
    +++ b/include/linux/bitops.h
    @@ -112,4 +112,144 @@ static inline unsigned fls_long(unsigned long l)
    return fls64(l);
    }

    +#ifdef __KERNEL__
    +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
    +extern unsigned long __find_first_bit(const unsigned long *addr,
    + unsigned long size);
    +
    +/**
    + * find_first_bit - find the first set bit in a memory region
    + * @addr: The address to start the search at
    + * @size: The maximum size to search
    + *
    + * Returns the bit number of the first set bit.
    + */
    +static __always_inline unsigned long
    +find_first_bit(const unsigned long *addr, unsigned long size)
    +{
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG))
    + return __ffs((*addr) | (1ul << size));
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
    + return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr);
    +
    + /* size is not constant or too big */
    + return __find_first_bit(addr, size);
    +}
    +
    +extern unsigned long __find_first_zero_bit(const unsigned long *addr,
    + unsigned long size);
    +
    +/**
    + * find_first_zero_bit - find the first cleared bit in a memory region
    + * @addr: The address to start the search at
    + * @size: The maximum size to search
    + *
    + * Returns the bit number of the first cleared bit.
    + */
    +static __always_inline unsigned long
    +find_first_zero_bit(const unsigned long *addr, unsigned long size)
    +{
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + return __ffs(~(*addr) | (1ul << size));
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
    + return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr));
    +
    + /* size is not constant or too big */
    + return __find_first_zero_bit(addr, size);
    +}
    +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
    +
    +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
    +extern unsigned long __find_next_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset);
    +
    +/**
    + * find_next_bit - find the next set bit in a memory region
    + * @addr: The address to base the search on
    + * @offset: The bitnumber to start searching at
    + * @size: The bitmap size in bits
    + */
    +static __always_inline unsigned long
    +find_next_bit(const unsigned long *addr, unsigned long size,
    + unsigned long offset)
    +{
    + unsigned long value;
    +
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + value = (*addr) & ((~0ul) << offset);
    + value |= (1ul << size);
    + return __ffs(value);
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
    + value = (*addr) & ((~0ul) << offset);
    + return (value == 0) ? BITS_PER_LONG : __ffs(value);
    + }
    +
    + /* size is not constant or too big */
    + return __find_next_bit(addr, size, offset);
    +}
    +
    +extern unsigned long __find_next_zero_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset);
    +
    +/**
    + * find_next_zero_bit - find the next cleared bit in a memory region
    + * @addr: The address to base the search on
    + * @offset: The bitnumber to start searching at
    + * @size: The bitmap size in bits
    + */
    +static __always_inline unsigned long
    +find_next_zero_bit(const unsigned long *addr, unsigned long size,
    + unsigned long offset)
    +{
    + unsigned long value;
    +
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + value = (~(*addr)) & ((~0ul) << offset);
    + value |= (1ul << size);
    + return __ffs(value);
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
    + value = (~(*addr)) & ((~0ul) << offset);
    + return (value == 0) ? BITS_PER_LONG : __ffs(value);
    + }
    +
    + /* size is not constant or too big */
    + return __find_next_zero_bit(addr, size, offset);
    +}
    +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
    +#endif /* __KERNEL__ */
    #endif
    diff --git a/lib/Kconfig b/lib/Kconfig
    index 2d53dc0..8cc8e87 100644
    --- a/lib/Kconfig
    +++ b/lib/Kconfig
    @@ -7,6 +7,12 @@ menu "Library routines"
    config BITREVERSE
    tristate

    +config GENERIC_FIND_FIRST_BIT
    + def_bool n
    +
    +config GENERIC_FIND_NEXT_BIT
    + def_bool n
    +
    config CRC_CCITT
    tristate "CRC-CCITT functions"
    help
    diff --git a/lib/Makefile b/lib/Makefile
    index bf8000f..2d7001b 100644
    --- a/lib/Makefile
    +++ b/lib/Makefile
    @@ -29,6 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
    obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
    lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
    lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
    +lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
    lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
    obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
    obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
    diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
    index 78ccd73..d3f5784 100644
    --- a/lib/find_next_bit.c
    +++ b/lib/find_next_bit.c
    @@ -16,14 +16,12 @@

    #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)

    -/**
    - * find_next_bit - find the next set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
    +/*
    + * Find the next set bit in a memory region.
    */
    -unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
    - unsigned long offset)
    +unsigned long __find_next_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset)
    {
    const unsigned long *p = addr + BITOP_WORD(offset);
    unsigned long result = offset & ~(BITS_PER_LONG-1);
    @@ -60,15 +58,14 @@ found_first:
    found_middle:
    return result + __ffs(tmp);
    }
    -
    -EXPORT_SYMBOL(find_next_bit);
    +EXPORT_SYMBOL(__find_next_bit);

    /*
    * This implementation of find_{first,next}_zero_bit was stolen from
    * Linus' asm-alpha/bitops.h.
    */
    -unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
    - unsigned long offset)
    +unsigned long __find_next_zero_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset)
    {
    const unsigned long *p = addr + BITOP_WORD(offset);
    unsigned long result = offset & ~(BITS_PER_LONG-1);
    @@ -105,8 +102,64 @@ found_first:
    found_middle:
    return result + ffz(tmp);
    }
    +EXPORT_SYMBOL(__find_next_zero_bit);
    +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
    +
    +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
    +/*
    + * Find the first set bit in a memory region.
    + */
    +unsigned long __find_first_bit(const unsigned long *addr,
    + unsigned long size)
    +{
    + const unsigned long *p = addr;
    + unsigned long result = 0;
    + unsigned long tmp;

    -EXPORT_SYMBOL(find_next_zero_bit);
    + while (size & ~(BITS_PER_LONG-1)) {
    + if ((tmp = *(p++)))
    + goto found;
    + result += BITS_PER_LONG;
    + size -= BITS_PER_LONG;
    + }
    + if (!size)
    + return result;
    +
    + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
    + if (tmp == 0UL) /* Are any bits set? */
    + return result + size; /* Nope. */
    +found:
    + return result + __ffs(tmp);
    +}
    +EXPORT_SYMBOL(__find_first_bit);
    +
    +/*
    + * Find the first cleared bit in a memory region.
    + */
    +unsigned long __find_first_zero_bit(const unsigned long *addr,
    + unsigned long size)
    +{
    + const unsigned long *p = addr;
    + unsigned long result = 0;
    + unsigned long tmp;
    +
    + while (size & ~(BITS_PER_LONG-1)) {
    + if (~(tmp = *(p++)))
    + goto found;
    + result += BITS_PER_LONG;
    + size -= BITS_PER_LONG;
    + }
    + if (!size)
    + return result;
    +
    + tmp = (*p) | (~0UL << size);
    + if (tmp == ~0UL) /* Are any bits zero? */
    + return result + size; /* Nope. */
    +found:
    + return result + ffz(tmp);
    +}
    +EXPORT_SYMBOL(__find_first_zero_bit);
    +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */

    #ifdef __BIG_ENDIAN

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  6. Re: [git pull] generic bitops, take 2



    On Sat, 26 Apr 2008, Ingo Molnar wrote:
    >
    > i've added Alexander's patch that does the cleanup suggested by you


    Well.. Not really:

    > diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
    > index 3fbe69e..7a75043 100644
    > --- a/arch/um/Kconfig.x86_64
    > +++ b/arch/um/Kconfig.x86_64
    > @@ -27,6 +27,14 @@ config SMP_BROKEN
    > bool
    > default y
    >
    > +config GENERIC_FIND_FIRST_BIT
    > + bool
    > + default y
    > +
    > +config GENERIC_FIND_NEXT_BIT
    > + bool
    > + default y
    > +


    It still declares this GENERIC_FIND_*_BIT thing separately for UM.

    Yes, that may _work_, but it's wrong to define it in two different places.

    It also makes me wonder why Kconfig.i386 can just include
    arch/x86/Kconfig.cpu, but x86_64 cannot?

    Linus
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  7. Re: [git pull] generic bitops, take 2


    * Ingo Molnar wrote:

    > perhaps the combination of distcc and crosscompilers doesnt work well
    > on UML? (it works fine with other architectures)


    so i tried a third build method, gcc 3.4, and that built it fine but UML
    crashed during bootup:

    dione:~/linux/linux-x86.q> ./linux
    Core dump limits :
    soft - 0
    hard - NONE
    Checking that ptrace can change system call numbers...OK
    Checking syscall emulation patch for ptrace...OK
    Checking advanced syscall emulation patch for ptrace...OK
    Checking for tmpfs mount on /dev/shm...OK
    Checking PROT_EXEC mmap in /dev/shm/...OK
    Checking for the skas3 patch in the host:
    - /proc/mm...not found: No such file or directory
    - PTRACE_FAULTINFO...not found
    - PTRACE_LDT...not found
    UML running in SKAS0 mode
    Segmentation fault

    (gdb) bt
    #0 0x000000006020fd92 in txInit ()
    #1 0x000000006000a4c1 in init_jfs_fs ()
    #2 0x0000000060001794 in do_initcalls ()
    #3 0x000000006000196d in do_basic_setup ()
    #4 0x0000000060001a14 in kernel_init ()
    #5 0x0000000060028427 in run_kernel_thread ()
    #6 0x000000006001d020 in new_thread_handler ()
    #7 0x0000000000000000 in ?? ()

    rip 0x6020fd92 0x6020fd92

    txInit is in JFS.

    but at least i could build UML. (this is all with pristine -git)

    So i checked the patch below. And it built fine, and boo^H^Hcrashed just
    like it did before:

    - PTRACE_FAULTINFO...not found
    - PTRACE_LDT...not found
    UML running in SKAS0 mode
    Segmentation fault

    so i guess this is the way to go?

    Ingo

    ---------------------->
    Subject: uml: kconfig cleanup
    From: Ingo Molnar
    Date: Sat Apr 26 19:10:17 CEST 2008

    Signed-off-by: Ingo Molnar
    ---
    arch/um/Kconfig.x86_64 | 7 +++++++
    1 file changed, 7 insertions(+)

    Index: linux-x86.q/arch/um/Kconfig.x86_64
    ================================================== =================
    --- linux-x86.q.orig/arch/um/Kconfig.x86_64
    +++ linux-x86.q/arch/um/Kconfig.x86_64
    @@ -1,3 +1,10 @@
    +
    +menu "Host processor type and features"
    +
    +source "arch/x86/Kconfig.cpu"
    +
    +endmenu
    +
    config UML_X86
    bool
    default y
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  8. Re: [git pull] generic bitops, take 2


    * Linus Torvalds wrote:

    > On Sat, 26 Apr 2008, Ingo Molnar wrote:
    > >
    > > i've added Alexander's patch that does the cleanup suggested by you

    >
    > Well.. Not really:
    >
    > > diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
    > > index 3fbe69e..7a75043 100644
    > > --- a/arch/um/Kconfig.x86_64
    > > +++ b/arch/um/Kconfig.x86_64
    > > @@ -27,6 +27,14 @@ config SMP_BROKEN
    > > bool
    > > default y
    > >
    > > +config GENERIC_FIND_FIRST_BIT
    > > + bool
    > > + default y
    > > +
    > > +config GENERIC_FIND_NEXT_BIT
    > > + bool
    > > + default y
    > > +

    >
    > It still declares this GENERIC_FIND_*_BIT thing separately for UM.
    >
    > Yes, that may _work_, but it's wrong to define it in two different places.
    >
    > It also makes me wonder why Kconfig.i386 can just include
    > arch/x86/Kconfig.cpu, but x86_64 cannot?


    hm, indeed arch/um/Kconfig.i386 is assymetric to Kconfig.x86_64. Jeff
    Cc:-ed.

    trying to see whether changing that would still result in a working UML
    arch i found that current UML doesnt seem to build here:

    arch/um/os-Linux/helper.c: In function 'run_helper':
    arch/um/os-Linux/helper.c:73: error: 'PATH_MAX' undeclared (first use in this function)

    it needs the patch below.

    then it fails with:

    mm/filemap.c: In function '__generic_file_aio_write_nolock':
    mm/filemap.c:1831: sorry, unimplemented: inlining failed in call to 'generic_write_checks': function body not available

    i used:

    make ARCH=um SUBARCH=x86_64 -j64 linux

    on x86-64, gcc-4.3.0-0.13. Config is:

    http://redhat.com/~mingo/misc/.config.um

    i also tried gcc 4.2.3, that built it fine but didnt link it:

    /opt/crosstool/gcc-4.2.3-glibc-2.3.6/x86_64-unknown-linux-gnu/lib/gcc/x86_64-unknown-linux-gnu/4.2.3/../../../../x86_64-unknown-linux-gnu/bin/ld:
    warning: ld-linux-x86-64.so.2, needed by
    /opt/crosstool/gcc-4.2.3-glibc-2.3.6/x86_64-unknown-linux-gnu/x86_64-unknown-linux-gnu/sys-root/lib/../lib64/libc.so.6,
    not found (try using -rpath or -rpath-link)
    /opt/crosstool/gcc-4.2.3-glibc-2.3.6/x86_64-unknown-linux-gnu/x86_64-unknown-linux-gnu/sys-root/lib/../lib64/libc.so.6:
    undefined reference to `_dl_argv@GLIBC_PRIVATE'

    ....
    /opt/crosstool/gcc-4.2.3-glibc-2.3.6/x86_64-unknown-linux-gnu/lib/gcc/x86_64-unknown-linux-gnu/4.2.3/../../../../x86_64-unknown-linux-gnu/bin/ld:
    warning: .fini_array section has zero size collect2: ld returned 1 exit
    status
    distcc[29125] ERROR: compile (null) on localhost failed
    KSYM .tmp_kallsyms1.S
    /opt/crosstool/gcc-4.2.3-glibc-2.3.6/x86_64-unknown-linux-gnu/bin/x86_64-unknown-linux-gnu-nm:
    '.tmp_vmlinux1': No such file

    perhaps the combination of distcc and crosscompilers doesnt work well on
    UML? (it works fine with other architectures)

    Ingo

    --------------->
    Subject: uml: fix
    From: Ingo Molnar
    Date: Sat Apr 26 18:59:42 CEST 2008

    Signed-off-by: Ingo Molnar
    ---
    arch/um/os-Linux/helper.c | 1 +
    1 file changed, 1 insertion(+)

    Index: linux-x86.q/arch/um/os-Linux/helper.c
    ================================================== =================
    --- linux-x86.q.orig/arch/um/os-Linux/helper.c
    +++ linux-x86.q/arch/um/os-Linux/helper.c
    @@ -14,6 +14,7 @@
    #include "os.h"
    #include "um_malloc.h"
    #include "user.h"
    +#include

    struct helper_data {
    void (*pre_exec)(void*);
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  9. Re: [git pull] generic bitops, take 2

    On Sat, Apr 26, 2008 at 10:03:31AM -0700, Linus Torvalds wrote:
    >
    >
    > On Sat, 26 Apr 2008, Ingo Molnar wrote:
    > >
    > > i've added Alexander's patch that does the cleanup suggested by you

    >
    > Well.. Not really:
    >
    > > diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
    > > index 3fbe69e..7a75043 100644
    > > --- a/arch/um/Kconfig.x86_64
    > > +++ b/arch/um/Kconfig.x86_64
    > > @@ -27,6 +27,14 @@ config SMP_BROKEN
    > > bool
    > > default y
    > >
    > > +config GENERIC_FIND_FIRST_BIT
    > > + bool
    > > + default y
    > > +
    > > +config GENERIC_FIND_NEXT_BIT
    > > + bool
    > > + default y
    > > +

    >
    > It still declares this GENERIC_FIND_*_BIT thing separately for UM.


    It would be nice if this could follow the sample outlined
    in Documentation/kbuild/kconfig-language.txt.

    See the section about HAVE_

    Basically we should only define the config symbol once in all
    the Kconfig files and where we then want to use this function
    we should select the config symbol.

    Sam
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  10. Re: [git pull] generic bitops, take 2

    On Sat, Apr 26, 2008 at 07:22:22PM +0200, Ingo Molnar wrote:
    >
    > * Linus Torvalds wrote:
    >
    > > On Sat, 26 Apr 2008, Ingo Molnar wrote:
    > > >
    > > > i've added Alexander's patch that does the cleanup suggested by you

    > >
    > > Well.. Not really:
    > >
    > > > diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
    > > > index 3fbe69e..7a75043 100644
    > > > --- a/arch/um/Kconfig.x86_64
    > > > +++ b/arch/um/Kconfig.x86_64
    > > > @@ -27,6 +27,14 @@ config SMP_BROKEN
    > > > bool
    > > > default y
    > > >
    > > > +config GENERIC_FIND_FIRST_BIT
    > > > + bool
    > > > + default y
    > > > +
    > > > +config GENERIC_FIND_NEXT_BIT
    > > > + bool
    > > > + default y
    > > > +

    > >
    > > It still declares this GENERIC_FIND_*_BIT thing separately for UM.
    > >
    > > Yes, that may _work_, but it's wrong to define it in two different places.
    > >
    > > It also makes me wonder why Kconfig.i386 can just include
    > > arch/x86/Kconfig.cpu, but x86_64 cannot?

    >
    > hm, indeed arch/um/Kconfig.i386 is assymetric to Kconfig.x86_64. Jeff
    > Cc:-ed.


    I may be able to explain why..
    Before the x68 merge we had an arch/i386/Kconfig.cpu but we did
    not have any arch/x86_64/Kconfig.cpu.
    When we merged i386 and x86_64 we included the 64 bit stuff in
    arch/x86/Kconfig.cpu and thus it became used by both 32bit and 64bit
    x86. But during this process we did not do the proper unification
    of um - so here we live with the old style.
    Blame the one who did the Kconfig unification..

    I would assume um should do similar unifaction and get rid of the
    i386/x86_64 split (bot I have not looked at doing it).

    Sam
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  11. [git pull] generic bitops, take 3


    * Ingo Molnar wrote:

    > So i checked the patch below. [...]


    > so i guess this is the way to go?


    i think it is. I just put these fixes into the bitops tree and 64-bit
    UML seems unaffected and properly picks up these symbols from the x86
    Kconfig.cpu file. I pushed out the v3 tree to:

    git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-generic-bitops-v3.git for-linus

    find the shortlog and diff below.

    this is against your very latest tree (which i rebased against to pick
    up the VFS fix). 5 successful build+bootup tests done with this fresh
    tree:

    #0, Sat_Apr_26_19_35_34_CEST_2008
    #1, Sat_Apr_26_19_37_02_CEST_2008
    #2, Sat_Apr_26_19_38_48_CEST_2008
    #3, Sat_Apr_26_19_41_36_CEST_2008
    #4, Sat_Apr_26_19_43_28_CEST_2008

    on x86, mixed 32-bit/64-bit.

    Ingo

    ------------------>
    Alexander van Heukelum (13):
    x86: change x86 to use generic find_next_bit
    x86, uml: fix uml with generic find_next_bit for x86
    x86, generic: optimize find_next_(zero_)bit for small constant-size bitmaps
    x86: merge the simple bitops and move them to bitops.h
    generic: introduce a generic __fls implementation
    generic: implement __fls on all 64-bit archs
    bitops: use __fls for fls64 on 64-bit archs
    x86: generic versions of find_first_(zero_)bit, convert i386
    x86: switch 64-bit to generic find_first_bit
    x86: optimize find_first_bit for small bitmaps
    x86, UML: remove x86-specific implementations of find_first_bit
    x86: finalize bitops unification
    x86, bitops: select the generic bitmap search functions

    Ingo Molnar (2):
    uml: fix build error
    uml: Kconfig cleanup

    Joe Perches (1):
    x86: include/asm-x86/pgalloc.h/bitops.h: checkpatch cleanups - formatting only

    arch/um/Kconfig.x86_64 | 7 ++
    arch/um/os-Linux/helper.c | 1 +
    arch/um/sys-i386/Makefile | 2 +-
    arch/um/sys-x86_64/Makefile | 2 +-
    arch/x86/Kconfig.cpu | 7 ++-
    arch/x86/lib/Makefile | 3 +-
    arch/x86/lib/bitops_32.c | 70 --------------
    arch/x86/lib/bitops_64.c | 175 ------------------------------------
    include/asm-alpha/bitops.h | 5 +
    include/asm-generic/bitops/__fls.h | 43 +++++++++
    include/asm-generic/bitops/find.h | 2 +
    include/asm-generic/bitops/fls64.h | 22 +++++
    include/asm-ia64/bitops.h | 16 ++++
    include/asm-mips/bitops.h | 5 +
    include/asm-parisc/bitops.h | 1 +
    include/asm-powerpc/bitops.h | 5 +
    include/asm-s390/bitops.h | 1 +
    include/asm-sh/bitops.h | 1 +
    include/asm-sparc64/bitops.h | 1 +
    include/asm-x86/bitops.h | 149 ++++++++++++++++++++++++++++---
    include/asm-x86/bitops_32.h | 166 ----------------------------------
    include/asm-x86/bitops_64.h | 162 ---------------------------------
    include/linux/bitops.h | 140 ++++++++++++++++++++++++++++
    lib/Kconfig | 6 ++
    lib/Makefile | 1 +
    lib/find_next_bit.c | 77 +++++++++++++---
    26 files changed, 467 insertions(+), 603 deletions(-)
    delete mode 100644 arch/x86/lib/bitops_32.c
    delete mode 100644 arch/x86/lib/bitops_64.c
    create mode 100644 include/asm-generic/bitops/__fls.h
    delete mode 100644 include/asm-x86/bitops_32.h
    delete mode 100644 include/asm-x86/bitops_64.h

    diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
    index 3fbe69e..5696e7b 100644
    --- a/arch/um/Kconfig.x86_64
    +++ b/arch/um/Kconfig.x86_64
    @@ -1,3 +1,10 @@
    +
    +menu "Host processor type and features"
    +
    +source "arch/x86/Kconfig.cpu"
    +
    +endmenu
    +
    config UML_X86
    bool
    default y
    diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
    index f4bd349..f25c29a 100644
    --- a/arch/um/os-Linux/helper.c
    +++ b/arch/um/os-Linux/helper.c
    @@ -14,6 +14,7 @@
    #include "os.h"
    #include "um_malloc.h"
    #include "user.h"
    +#include

    struct helper_data {
    void (*pre_exec)(void*);
    diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
    index 964dc1a..598b5c1 100644
    --- a/arch/um/sys-i386/Makefile
    +++ b/arch/um/sys-i386/Makefile
    @@ -6,7 +6,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
    ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
    sys_call_table.o tls.o

    -subarch-obj-y = lib/bitops_32.o lib/semaphore_32.o lib/string_32.o
    +subarch-obj-y = lib/semaphore_32.o lib/string_32.o
    subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
    subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o

    diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
    index 3c22de5..c8b4cce 100644
    --- a/arch/um/sys-x86_64/Makefile
    +++ b/arch/um/sys-x86_64/Makefile
    @@ -10,7 +10,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \

    obj-$(CONFIG_MODULES) += um_module.o

    -subarch-obj-y = lib/bitops_64.o lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
    +subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
    subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o

    ldt-y = ../sys-i386/ldt.o
    diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
    index 4da3cdb..7ef18b0 100644
    --- a/arch/x86/Kconfig.cpu
    +++ b/arch/x86/Kconfig.cpu
    @@ -278,6 +278,11 @@ config GENERIC_CPU

    endchoice

    +config X86_CPU
    + def_bool y
    + select GENERIC_FIND_FIRST_BIT
    + select GENERIC_FIND_NEXT_BIT
    +
    config X86_GENERIC
    bool "Generic x86 support"
    depends on X86_32
    @@ -398,7 +403,7 @@ config X86_TSC
    # generates cmov.
    config X86_CMOV
    def_bool y
    - depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7)
    + depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || X86_64)

    config X86_MINIMUM_CPU_FAMILY
    int
    diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
    index 25df1c1..76f60f5 100644
    --- a/arch/x86/lib/Makefile
    +++ b/arch/x86/lib/Makefile
    @@ -11,7 +11,7 @@ lib-y += memcpy_$(BITS).o
    ifeq ($(CONFIG_X86_32),y)
    lib-y += checksum_32.o
    lib-y += strstr_32.o
    - lib-y += bitops_32.o semaphore_32.o string_32.o
    + lib-y += semaphore_32.o string_32.o

    lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
    else
    @@ -21,7 +21,6 @@ else

    lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
    lib-y += thunk_64.o clear_page_64.o copy_page_64.o
    - lib-y += bitops_64.o
    lib-y += memmove_64.o memset_64.o
    lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
    endif
    diff --git a/arch/x86/lib/bitops_32.c b/arch/x86/lib/bitops_32.c
    deleted file mode 100644
    index b654404..0000000
    --- a/arch/x86/lib/bitops_32.c
    +++ /dev/null
    @@ -1,70 +0,0 @@
    -#include
    -#include
    -
    -/**
    - * find_next_bit - find the next set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_bit(const unsigned long *addr, int size, int offset)
    -{
    - const unsigned long *p = addr + (offset >> 5);
    - int set = 0, bit = offset & 31, res;
    -
    - if (bit) {
    - /*
    - * Look for nonzero in the first 32 bits:
    - */
    - __asm__("bsfl %1,%0\n\t"
    - "jne 1f\n\t"
    - "movl $32, %0\n"
    - "1:"
    - : "=r" (set)
    - : "r" (*p >> bit));
    - if (set < (32 - bit))
    - return set + offset;
    - set = 32 - bit;
    - p++;
    - }
    - /*
    - * No set bit yet, search remaining full words for a bit
    - */
    - res = find_first_bit (p, size - 32 * (p - addr));
    - return (offset + set + res);
    -}
    -EXPORT_SYMBOL(find_next_bit);
    -
    -/**
    - * find_next_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_zero_bit(const unsigned long *addr, int size, int offset)
    -{
    - const unsigned long *p = addr + (offset >> 5);
    - int set = 0, bit = offset & 31, res;
    -
    - if (bit) {
    - /*
    - * Look for zero in the first 32 bits.
    - */
    - __asm__("bsfl %1,%0\n\t"
    - "jne 1f\n\t"
    - "movl $32, %0\n"
    - "1:"
    - : "=r" (set)
    - : "r" (~(*p >> bit)));
    - if (set < (32 - bit))
    - return set + offset;
    - set = 32 - bit;
    - p++;
    - }
    - /*
    - * No zero yet, search remaining full bytes for a zero
    - */
    - res = find_first_zero_bit(p, size - 32 * (p - addr));
    - return (offset + set + res);
    -}
    -EXPORT_SYMBOL(find_next_zero_bit);
    diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c
    deleted file mode 100644
    index 0e8f491..0000000
    --- a/arch/x86/lib/bitops_64.c
    +++ /dev/null
    @@ -1,175 +0,0 @@
    -#include
    -
    -#undef find_first_zero_bit
    -#undef find_next_zero_bit
    -#undef find_first_bit
    -#undef find_next_bit
    -
    -static inline long
    -__find_first_zero_bit(const unsigned long * addr, unsigned long size)
    -{
    - long d0, d1, d2;
    - long res;
    -
    - /*
    - * We must test the size in words, not in bits, because
    - * otherwise incoming sizes in the range -63..-1 will not run
    - * any scasq instructions, and then the flags used by the je
    - * instruction will have whatever random value was in place
    - * before. Nobody should call us like that, but
    - * find_next_zero_bit() does when offset and size are at the
    - * same word and it fails to find a zero itself.
    - */
    - size += 63;
    - size >>= 6;
    - if (!size)
    - return 0;
    - asm volatile(
    - " repe; scasq\n"
    - " je 1f\n"
    - " xorq -8(%%rdi),%%rax\n"
    - " subq $8,%%rdi\n"
    - " bsfq %%rax,%%rdx\n"
    - "1: subq %[addr],%%rdi\n"
    - " shlq $3,%%rdi\n"
    - " addq %%rdi,%%rdx"
    - :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
    - :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
    - [addr] "S" (addr) : "memory");
    - /*
    - * Any register would do for [addr] above, but GCC tends to
    - * prefer rbx over rsi, even though rsi is readily available
    - * and doesn't have to be saved.
    - */
    - return res;
    -}
    -
    -/**
    - * find_first_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit-number of the first zero bit, not the number of the byte
    - * containing a bit.
    - */
    -long find_first_zero_bit(const unsigned long * addr, unsigned long size)
    -{
    - return __find_first_zero_bit (addr, size);
    -}
    -
    -/**
    - * find_next_zero_bit - find the next zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -long find_next_zero_bit (const unsigned long * addr, long size, long offset)
    -{
    - const unsigned long * p = addr + (offset >> 6);
    - unsigned long set = 0;
    - unsigned long res, bit = offset&63;
    -
    - if (bit) {
    - /*
    - * Look for zero in first word
    - */
    - asm("bsfq %1,%0\n\t"
    - "cmoveq %2,%0"
    - : "=r" (set)
    - : "r" (~(*p >> bit)), "r"(64L));
    - if (set < (64 - bit))
    - return set + offset;
    - set = 64 - bit;
    - p++;
    - }
    - /*
    - * No zero yet, search remaining full words for a zero
    - */
    - res = __find_first_zero_bit (p, size - 64 * (p - addr));
    -
    - return (offset + set + res);
    -}
    -
    -static inline long
    -__find_first_bit(const unsigned long * addr, unsigned long size)
    -{
    - long d0, d1;
    - long res;
    -
    - /*
    - * We must test the size in words, not in bits, because
    - * otherwise incoming sizes in the range -63..-1 will not run
    - * any scasq instructions, and then the flags used by the jz
    - * instruction will have whatever random value was in place
    - * before. Nobody should call us like that, but
    - * find_next_bit() does when offset and size are at the same
    - * word and it fails to find a one itself.
    - */
    - size += 63;
    - size >>= 6;
    - if (!size)
    - return 0;
    - asm volatile(
    - " repe; scasq\n"
    - " jz 1f\n"
    - " subq $8,%%rdi\n"
    - " bsfq (%%rdi),%%rax\n"
    - "1: subq %[addr],%%rdi\n"
    - " shlq $3,%%rdi\n"
    - " addq %%rdi,%%rax"
    - :"=a" (res), "=&c" (d0), "=&D" (d1)
    - :"0" (0ULL), "1" (size), "2" (addr),
    - [addr] "r" (addr) : "memory");
    - return res;
    -}
    -
    -/**
    - * find_first_bit - find the first set bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit-number of the first set bit, not the number of the byte
    - * containing a bit.
    - */
    -long find_first_bit(const unsigned long * addr, unsigned long size)
    -{
    - return __find_first_bit(addr,size);
    -}
    -
    -/**
    - * find_next_bit - find the first set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    - */
    -long find_next_bit(const unsigned long * addr, long size, long offset)
    -{
    - const unsigned long * p = addr + (offset >> 6);
    - unsigned long set = 0, bit = offset & 63, res;
    -
    - if (bit) {
    - /*
    - * Look for nonzero in the first 64 bits:
    - */
    - asm("bsfq %1,%0\n\t"
    - "cmoveq %2,%0\n\t"
    - : "=r" (set)
    - : "r" (*p >> bit), "r" (64L));
    - if (set < (64 - bit))
    - return set + offset;
    - set = 64 - bit;
    - p++;
    - }
    - /*
    - * No set bit yet, search remaining full words for a bit
    - */
    - res = __find_first_bit (p, size - 64 * (p - addr));
    - return (offset + set + res);
    -}
    -
    -#include
    -
    -EXPORT_SYMBOL(find_next_bit);
    -EXPORT_SYMBOL(find_first_bit);
    -EXPORT_SYMBOL(find_first_zero_bit);
    -EXPORT_SYMBOL(find_next_zero_bit);
    diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
    index 9e19a70..15f3ae2 100644
    --- a/include/asm-alpha/bitops.h
    +++ b/include/asm-alpha/bitops.h
    @@ -388,6 +388,11 @@ static inline int fls64(unsigned long x)
    }
    #endif

    +static inline unsigned long __fls(unsigned long x)
    +{
    + return fls64(x) - 1;
    +}
    +
    static inline int fls(int x)
    {
    return fls64((unsigned int) x);
    diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
    new file mode 100644
    index 0000000..be24465
    --- /dev/null
    +++ b/include/asm-generic/bitops/__fls.h
    @@ -0,0 +1,43 @@
    +#ifndef _ASM_GENERIC_BITOPS___FLS_H_
    +#define _ASM_GENERIC_BITOPS___FLS_H_
    +
    +#include
    +
    +/**
    + * __fls - find last (most-significant) set bit in a long word
    + * @word: the word to search
    + *
    + * Undefined if no set bit exists, so code should check against 0 first.
    + */
    +static inline unsigned long __fls(unsigned long word)
    +{
    + int num = BITS_PER_LONG - 1;
    +
    +#if BITS_PER_LONG == 64
    + if (!(word & (~0ul << 32))) {
    + num -= 32;
    + word <<= 32;
    + }
    +#endif
    + if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
    + num -= 16;
    + word <<= 16;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
    + num -= 8;
    + word <<= 8;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
    + num -= 4;
    + word <<= 4;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
    + num -= 2;
    + word <<= 2;
    + }
    + if (!(word & (~0ul << (BITS_PER_LONG-1))))
    + num -= 1;
    + return num;
    +}
    +
    +#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
    diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
    index 72a51e5..1914e97 100644
    --- a/include/asm-generic/bitops/find.h
    +++ b/include/asm-generic/bitops/find.h
    @@ -1,11 +1,13 @@
    #ifndef _ASM_GENERIC_BITOPS_FIND_H_
    #define _ASM_GENERIC_BITOPS_FIND_H_

    +#ifndef CONFIG_GENERIC_FIND_NEXT_BIT
    extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
    size, unsigned long offset);

    extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
    long size, unsigned long offset);
    +#endif

    #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
    #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
    diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
    index 1b6b17c..86d403f 100644
    --- a/include/asm-generic/bitops/fls64.h
    +++ b/include/asm-generic/bitops/fls64.h
    @@ -3,6 +3,18 @@

    #include

    +/**
    + * fls64 - find last set bit in a 64-bit word
    + * @x: the word to search
    + *
    + * This is defined in a similar way as the libc and compiler builtin
    + * ffsll, but returns the position of the most significant set bit.
    + *
    + * fls64(value) returns 0 if value is 0 or the position of the last
    + * set bit if value is nonzero. The last (most significant) bit is
    + * at position 64.
    + */
    +#if BITS_PER_LONG == 32
    static inline int fls64(__u64 x)
    {
    __u32 h = x >> 32;
    @@ -10,5 +22,15 @@ static inline int fls64(__u64 x)
    return fls(h) + 32;
    return fls(x);
    }
    +#elif BITS_PER_LONG == 64
    +static inline int fls64(__u64 x)
    +{
    + if (x == 0)
    + return 0;
    + return __fls(x) + 1;
    +}
    +#else
    +#error BITS_PER_LONG not 32 or 64
    +#endif

    #endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
    diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
    index 953d3df..e2ca800 100644
    --- a/include/asm-ia64/bitops.h
    +++ b/include/asm-ia64/bitops.h
    @@ -407,6 +407,22 @@ fls (int t)
    return ia64_popcnt(x);
    }

    +/*
    + * Find the last (most significant) bit set. Undefined for x==0.
    + * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
    + */
    +static inline unsigned long
    +__fls (unsigned long x)
    +{
    + x |= x >> 1;
    + x |= x >> 2;
    + x |= x >> 4;
    + x |= x >> 8;
    + x |= x >> 16;
    + x |= x >> 32;
    + return ia64_popcnt(x) - 1;
    +}
    +
    #include

    /*
    diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
    index ec75ce4..c2bd126 100644
    --- a/include/asm-mips/bitops.h
    +++ b/include/asm-mips/bitops.h
    @@ -591,6 +591,11 @@ static inline int __ilog2(unsigned long x)
    return 63 - lz;
    }

    +static inline unsigned long __fls(unsigned long x)
    +{
    + return __ilog2(x);
    +}
    +
    #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)

    /*
    diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
    index f8eebcb..7a6ea10 100644
    --- a/include/asm-parisc/bitops.h
    +++ b/include/asm-parisc/bitops.h
    @@ -210,6 +210,7 @@ static __inline__ int fls(int x)
    return ret;
    }

    +#include
    #include
    #include
    #include
    diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
    index a99a749..897eade 100644
    --- a/include/asm-powerpc/bitops.h
    +++ b/include/asm-powerpc/bitops.h
    @@ -313,6 +313,11 @@ static __inline__ int fls(unsigned int x)
    return 32 - lz;
    }

    +static __inline__ unsigned long __fls(unsigned long x)
    +{
    + return __ilog2(x);
    +}
    +
    /*
    * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
    * instruction; for 32-bit we use the generic version, which does two
    diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
    index 965394e..b4eb24a 100644
    --- a/include/asm-s390/bitops.h
    +++ b/include/asm-s390/bitops.h
    @@ -769,6 +769,7 @@ static inline int sched_find_first_bit(unsigned long *b)
    }

    #include
    +#include
    #include

    #include
    diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
    index b6ba5a6..d7d382f 100644
    --- a/include/asm-sh/bitops.h
    +++ b/include/asm-sh/bitops.h
    @@ -95,6 +95,7 @@ static inline unsigned long ffz(unsigned long word)
    #include
    #include
    #include
    +#include
    #include

    #endif /* __KERNEL__ */
    diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
    index 982ce89..11f9d81 100644
    --- a/include/asm-sparc64/bitops.h
    +++ b/include/asm-sparc64/bitops.h
    @@ -34,6 +34,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
    #include
    #include
    #include
    +#include
    #include

    #ifdef __KERNEL__
    diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
    index 1ae7b27..b81a4d4 100644
    --- a/include/asm-x86/bitops.h
    +++ b/include/asm-x86/bitops.h
    @@ -62,12 +62,9 @@ static inline void set_bit(int nr, volatile void *addr)
    */
    static inline void __set_bit(int nr, volatile void *addr)
    {
    - asm volatile("bts %1,%0"
    - : ADDR
    - : "Ir" (nr) : "memory");
    + asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
    }

    -
    /**
    * clear_bit - Clears a bit in memory
    * @nr: Bit to clear
    @@ -297,19 +294,145 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
    static int test_bit(int nr, const volatile unsigned long *addr);
    #endif

    -#define test_bit(nr,addr) \
    - (__builtin_constant_p(nr) ? \
    - constant_test_bit((nr),(addr)) : \
    - variable_test_bit((nr),(addr)))
    +#define test_bit(nr, addr) \
    + (__builtin_constant_p((nr)) \
    + ? constant_test_bit((nr), (addr)) \
    + : variable_test_bit((nr), (addr)))
    +
    +/**
    + * __ffs - find first set bit in word
    + * @word: The word to search
    + *
    + * Undefined if no bit exists, so code should check against 0 first.
    + */
    +static inline unsigned long __ffs(unsigned long word)
    +{
    + asm("bsf %1,%0"
    + : "=r" (word)
    + : "rm" (word));
    + return word;
    +}
    +
    +/**
    + * ffz - find first zero bit in word
    + * @word: The word to search
    + *
    + * Undefined if no zero exists, so code should check against ~0UL first.
    + */
    +static inline unsigned long ffz(unsigned long word)
    +{
    + asm("bsf %1,%0"
    + : "=r" (word)
    + : "r" (~word));
    + return word;
    +}
    +
    +/*
    + * __fls: find last set bit in word
    + * @word: The word to search
    + *
    + * Undefined if no zero exists, so code should check against ~0UL first.
    + */
    +static inline unsigned long __fls(unsigned long word)
    +{
    + asm("bsr %1,%0"
    + : "=r" (word)
    + : "rm" (word));
    + return word;
    +}
    +
    +#ifdef __KERNEL__
    +/**
    + * ffs - find first set bit in word
    + * @x: the word to search
    + *
    + * This is defined the same way as the libc and compiler builtin ffs
    + * routines, therefore differs in spirit from the other bitops.
    + *
    + * ffs(value) returns 0 if value is 0 or the position of the first
    + * set bit if value is nonzero. The first (least significant) bit
    + * is at position 1.
    + */
    +static inline int ffs(int x)
    +{
    + int r;
    +#ifdef CONFIG_X86_CMOV
    + asm("bsfl %1,%0\n\t"
    + "cmovzl %2,%0"
    + : "=r" (r) : "rm" (x), "r" (-1));
    +#else
    + asm("bsfl %1,%0\n\t"
    + "jnz 1f\n\t"
    + "movl $-1,%0\n"
    + "1:" : "=r" (r) : "rm" (x));
    +#endif
    + return r + 1;
    +}
    +
    +/**
    + * fls - find last set bit in word
    + * @x: the word to search
    + *
    + * This is defined in a similar way as the libc and compiler builtin
    + * ffs, but returns the position of the most significant set bit.
    + *
    + * fls(value) returns 0 if value is 0 or the position of the last
    + * set bit if value is nonzero. The last (most significant) bit is
    + * at position 32.
    + */
    +static inline int fls(int x)
    +{
    + int r;
    +#ifdef CONFIG_X86_CMOV
    + asm("bsrl %1,%0\n\t"
    + "cmovzl %2,%0"
    + : "=&r" (r) : "rm" (x), "rm" (-1));
    +#else
    + asm("bsrl %1,%0\n\t"
    + "jnz 1f\n\t"
    + "movl $-1,%0\n"
    + "1:" : "=r" (r) : "rm" (x));
    +#endif
    + return r + 1;
    +}
    +#endif /* __KERNEL__ */

    #undef BASE_ADDR
    #undef BIT_ADDR
    #undef ADDR

    -#ifdef CONFIG_X86_32
    -# include "bitops_32.h"
    -#else
    -# include "bitops_64.h"
    -#endif
    +static inline void set_bit_string(unsigned long *bitmap,
    + unsigned long i, int len)
    +{
    + unsigned long end = i + len;
    + while (i < end) {
    + __set_bit(i, bitmap);
    + i++;
    + }
    +}
    +
    +#ifdef __KERNEL__
    +
    +#include
    +
    +#define ARCH_HAS_FAST_MULTIPLIER 1
    +
    +#include
    +
    +#endif /* __KERNEL__ */
    +
    +#include
    +
    +#ifdef __KERNEL__
    +
    +#include
    +
    +#define ext2_set_bit_atomic(lock, nr, addr) \
    + test_and_set_bit((nr), (unsigned long *)(addr))
    +#define ext2_clear_bit_atomic(lock, nr, addr) \
    + test_and_clear_bit((nr), (unsigned long *)(addr))
    +
    +#include

    +#endif /* __KERNEL__ */
    #endif /* _ASM_X86_BITOPS_H */
    diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
    deleted file mode 100644
    index 2513a81..0000000
    --- a/include/asm-x86/bitops_32.h
    +++ /dev/null
    @@ -1,166 +0,0 @@
    -#ifndef _I386_BITOPS_H
    -#define _I386_BITOPS_H
    -
    -/*
    - * Copyright 1992, Linus Torvalds.
    - */
    -
    -/**
    - * find_first_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit number of the first zero bit, not the number of the byte
    - * containing a bit.
    - */
    -static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
    -{
    - int d0, d1, d2;
    - int res;
    -
    - if (!size)
    - return 0;
    - /* This looks at memory.
    - * Mark it volatile to tell gcc not to move it around
    - */
    - asm volatile("movl $-1,%%eax\n\t"
    - "xorl %%edx,%%edx\n\t"
    - "repe; scasl\n\t"
    - "je 1f\n\t"
    - "xorl -4(%%edi),%%eax\n\t"
    - "subl $4,%%edi\n\t"
    - "bsfl %%eax,%%edx\n"
    - "1:\tsubl %%ebx,%%edi\n\t"
    - "shll $3,%%edi\n\t"
    - "addl %%edi,%%edx"
    - : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
    - : "1" ((size + 31) >> 5), "2" (addr),
    - "b" (addr) : "memory");
    - return res;
    -}
    -
    -/**
    - * find_next_zero_bit - find the first zero bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bit number to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_zero_bit(const unsigned long *addr, int size, int offset);
    -
    -/**
    - * __ffs - find first bit in word.
    - * @word: The word to search
    - *
    - * Undefined if no bit exists, so code should check against 0 first.
    - */
    -static inline unsigned long __ffs(unsigned long word)
    -{
    - __asm__("bsfl %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -/**
    - * find_first_bit - find the first set bit in a memory region
    - * @addr: The address to start the search at
    - * @size: The maximum size to search
    - *
    - * Returns the bit number of the first set bit, not the number of the byte
    - * containing a bit.
    - */
    -static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
    -{
    - unsigned x = 0;
    -
    - while (x < size) {
    - unsigned long val = *addr++;
    - if (val)
    - return __ffs(val) + x;
    - x += sizeof(*addr) << 3;
    - }
    - return x;
    -}
    -
    -/**
    - * find_next_bit - find the first set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bit number to start searching at
    - * @size: The maximum size to search
    - */
    -int find_next_bit(const unsigned long *addr, int size, int offset);
    -
    -/**
    - * ffz - find first zero in word.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long ffz(unsigned long word)
    -{
    - __asm__("bsfl %1,%0"
    - :"=r" (word)
    - :"r" (~word));
    - return word;
    -}
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -/**
    - * ffs - find first bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as
    - * the libc and compiler builtin ffs routines, therefore
    - * differs in spirit from the above ffz() (man ffs).
    - */
    -static inline int ffs(int x)
    -{
    - int r;
    -
    - __asm__("bsfl %1,%0\n\t"
    - "jnz 1f\n\t"
    - "movl $-1,%0\n"
    - "1:" : "=r" (r) : "rm" (x));
    - return r+1;
    -}
    -
    -/**
    - * fls - find last bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as ffs().
    - */
    -static inline int fls(int x)
    -{
    - int r;
    -
    - __asm__("bsrl %1,%0\n\t"
    - "jnz 1f\n\t"
    - "movl $-1,%0\n"
    - "1:" : "=r" (r) : "rm" (x));
    - return r+1;
    -}
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#include
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -#define ext2_set_bit_atomic(lock, nr, addr) \
    - test_and_set_bit((nr), (unsigned long *)(addr))
    -#define ext2_clear_bit_atomic(lock, nr, addr) \
    - test_and_clear_bit((nr), (unsigned long *)(addr))
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#endif /* _I386_BITOPS_H */
    diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
    deleted file mode 100644
    index 365f820..0000000
    --- a/include/asm-x86/bitops_64.h
    +++ /dev/null
    @@ -1,162 +0,0 @@
    -#ifndef _X86_64_BITOPS_H
    -#define _X86_64_BITOPS_H
    -
    -/*
    - * Copyright 1992, Linus Torvalds.
    - */
    -
    -extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
    -extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
    -extern long find_first_bit(const unsigned long *addr, unsigned long size);
    -extern long find_next_bit(const unsigned long *addr, long size, long offset);
    -
    -/* return index of first bet set in val or max when no bit is set */
    -static inline long __scanbit(unsigned long val, unsigned long max)
    -{
    - asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
    - return val;
    -}
    -
    -#define find_next_bit(addr,size,off) \
    -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
    - ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
    - find_next_bit(addr,size,off)))
    -
    -#define find_next_zero_bit(addr,size,off) \
    -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
    - ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
    - find_next_zero_bit(addr,size,off)))
    -
    -#define find_first_bit(addr, size) \
    - ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
    - ? (__scanbit(*(unsigned long *)(addr), (size))) \
    - : find_first_bit((addr), (size))))
    -
    -#define find_first_zero_bit(addr, size) \
    - ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
    - ? (__scanbit(~*(unsigned long *)(addr), (size))) \
    - : find_first_zero_bit((addr), (size))))
    -
    -static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
    - int len)
    -{
    - unsigned long end = i + len;
    - while (i < end) {
    - __set_bit(i, bitmap);
    - i++;
    - }
    -}
    -
    -/**
    - * ffz - find first zero in word.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long ffz(unsigned long word)
    -{
    - __asm__("bsfq %1,%0"
    - :"=r" (word)
    - :"r" (~word));
    - return word;
    -}
    -
    -/**
    - * __ffs - find first bit in word.
    - * @word: The word to search
    - *
    - * Undefined if no bit exists, so code should check against 0 first.
    - */
    -static inline unsigned long __ffs(unsigned long word)
    -{
    - __asm__("bsfq %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -/*
    - * __fls: find last bit set.
    - * @word: The word to search
    - *
    - * Undefined if no zero exists, so code should check against ~0UL first.
    - */
    -static inline unsigned long __fls(unsigned long word)
    -{
    - __asm__("bsrq %1,%0"
    - :"=r" (word)
    - :"rm" (word));
    - return word;
    -}
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -/**
    - * ffs - find first bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as
    - * the libc and compiler builtin ffs routines, therefore
    - * differs in spirit from the above ffz (man ffs).
    - */
    -static inline int ffs(int x)
    -{
    - int r;
    -
    - __asm__("bsfl %1,%0\n\t"
    - "cmovzl %2,%0"
    - : "=r" (r) : "rm" (x), "r" (-1));
    - return r+1;
    -}
    -
    -/**
    - * fls64 - find last bit set in 64 bit word
    - * @x: the word to search
    - *
    - * This is defined the same way as fls.
    - */
    -static inline int fls64(__u64 x)
    -{
    - if (x == 0)
    - return 0;
    - return __fls(x) + 1;
    -}
    -
    -/**
    - * fls - find last bit set
    - * @x: the word to search
    - *
    - * This is defined the same way as ffs.
    - */
    -static inline int fls(int x)
    -{
    - int r;
    -
    - __asm__("bsrl %1,%0\n\t"
    - "cmovzl %2,%0"
    - : "=&r" (r) : "rm" (x), "rm" (-1));
    - return r+1;
    -}
    -
    -#define ARCH_HAS_FAST_MULTIPLIER 1
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#ifdef __KERNEL__
    -
    -#include
    -
    -#define ext2_set_bit_atomic(lock, nr, addr) \
    - test_and_set_bit((nr), (unsigned long *)(addr))
    -#define ext2_clear_bit_atomic(lock, nr, addr) \
    - test_and_clear_bit((nr), (unsigned long *)(addr))
    -
    -#include
    -
    -#endif /* __KERNEL__ */
    -
    -#endif /* _X86_64_BITOPS_H */
    diff --git a/include/linux/bitops.h b/include/linux/bitops.h
    index 40d5473..48bde60 100644
    --- a/include/linux/bitops.h
    +++ b/include/linux/bitops.h
    @@ -112,4 +112,144 @@ static inline unsigned fls_long(unsigned long l)
    return fls64(l);
    }

    +#ifdef __KERNEL__
    +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
    +extern unsigned long __find_first_bit(const unsigned long *addr,
    + unsigned long size);
    +
    +/**
    + * find_first_bit - find the first set bit in a memory region
    + * @addr: The address to start the search at
    + * @size: The maximum size to search
    + *
    + * Returns the bit number of the first set bit.
    + */
    +static __always_inline unsigned long
    +find_first_bit(const unsigned long *addr, unsigned long size)
    +{
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG))
    + return __ffs((*addr) | (1ul << size));
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
    + return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr);
    +
    + /* size is not constant or too big */
    + return __find_first_bit(addr, size);
    +}
    +
    +extern unsigned long __find_first_zero_bit(const unsigned long *addr,
    + unsigned long size);
    +
    +/**
    + * find_first_zero_bit - find the first cleared bit in a memory region
    + * @addr: The address to start the search at
    + * @size: The maximum size to search
    + *
    + * Returns the bit number of the first cleared bit.
    + */
    +static __always_inline unsigned long
    +find_first_zero_bit(const unsigned long *addr, unsigned long size)
    +{
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + return __ffs(~(*addr) | (1ul << size));
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG))
    + return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr));
    +
    + /* size is not constant or too big */
    + return __find_first_zero_bit(addr, size);
    +}
    +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
    +
    +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
    +extern unsigned long __find_next_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset);
    +
    +/**
    + * find_next_bit - find the next set bit in a memory region
    + * @addr: The address to base the search on
    + * @offset: The bitnumber to start searching at
    + * @size: The bitmap size in bits
    + */
    +static __always_inline unsigned long
    +find_next_bit(const unsigned long *addr, unsigned long size,
    + unsigned long offset)
    +{
    + unsigned long value;
    +
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + value = (*addr) & ((~0ul) << offset);
    + value |= (1ul << size);
    + return __ffs(value);
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
    + value = (*addr) & ((~0ul) << offset);
    + return (value == 0) ? BITS_PER_LONG : __ffs(value);
    + }
    +
    + /* size is not constant or too big */
    + return __find_next_bit(addr, size, offset);
    +}
    +
    +extern unsigned long __find_next_zero_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset);
    +
    +/**
    + * find_next_zero_bit - find the next cleared bit in a memory region
    + * @addr: The address to base the search on
    + * @offset: The bitnumber to start searching at
    + * @size: The bitmap size in bits
    + */
    +static __always_inline unsigned long
    +find_next_zero_bit(const unsigned long *addr, unsigned long size,
    + unsigned long offset)
    +{
    + unsigned long value;
    +
    + /* Avoid a function call if the bitmap size is a constant */
    + /* and not bigger than BITS_PER_LONG. */
    +
    + /* insert a sentinel so that __ffs returns size if there */
    + /* are no set bits in the bitmap */
    + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) {
    + value = (~(*addr)) & ((~0ul) << offset);
    + value |= (1ul << size);
    + return __ffs(value);
    + }
    +
    + /* the result of __ffs(0) is undefined, so it needs to be */
    + /* handled separately */
    + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) {
    + value = (~(*addr)) & ((~0ul) << offset);
    + return (value == 0) ? BITS_PER_LONG : __ffs(value);
    + }
    +
    + /* size is not constant or too big */
    + return __find_next_zero_bit(addr, size, offset);
    +}
    +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
    +#endif /* __KERNEL__ */
    #endif
    diff --git a/lib/Kconfig b/lib/Kconfig
    index 2d53dc0..8cc8e87 100644
    --- a/lib/Kconfig
    +++ b/lib/Kconfig
    @@ -7,6 +7,12 @@ menu "Library routines"
    config BITREVERSE
    tristate

    +config GENERIC_FIND_FIRST_BIT
    + def_bool n
    +
    +config GENERIC_FIND_NEXT_BIT
    + def_bool n
    +
    config CRC_CCITT
    tristate "CRC-CCITT functions"
    help
    diff --git a/lib/Makefile b/lib/Makefile
    index bf8000f..2d7001b 100644
    --- a/lib/Makefile
    +++ b/lib/Makefile
    @@ -29,6 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
    obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
    lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
    lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
    +lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
    lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
    obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
    obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
    diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
    index 78ccd73..d3f5784 100644
    --- a/lib/find_next_bit.c
    +++ b/lib/find_next_bit.c
    @@ -16,14 +16,12 @@

    #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)

    -/**
    - * find_next_bit - find the next set bit in a memory region
    - * @addr: The address to base the search on
    - * @offset: The bitnumber to start searching at
    - * @size: The maximum size to search
    +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
    +/*
    + * Find the next set bit in a memory region.
    */
    -unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
    - unsigned long offset)
    +unsigned long __find_next_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset)
    {
    const unsigned long *p = addr + BITOP_WORD(offset);
    unsigned long result = offset & ~(BITS_PER_LONG-1);
    @@ -60,15 +58,14 @@ found_first:
    found_middle:
    return result + __ffs(tmp);
    }
    -
    -EXPORT_SYMBOL(find_next_bit);
    +EXPORT_SYMBOL(__find_next_bit);

    /*
    * This implementation of find_{first,next}_zero_bit was stolen from
    * Linus' asm-alpha/bitops.h.
    */
    -unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
    - unsigned long offset)
    +unsigned long __find_next_zero_bit(const unsigned long *addr,
    + unsigned long size, unsigned long offset)
    {
    const unsigned long *p = addr + BITOP_WORD(offset);
    unsigned long result = offset & ~(BITS_PER_LONG-1);
    @@ -105,8 +102,64 @@ found_first:
    found_middle:
    return result + ffz(tmp);
    }
    +EXPORT_SYMBOL(__find_next_zero_bit);
    +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
    +
    +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
    +/*
    + * Find the first set bit in a memory region.
    + */
    +unsigned long __find_first_bit(const unsigned long *addr,
    + unsigned long size)
    +{
    + const unsigned long *p = addr;
    + unsigned long result = 0;
    + unsigned long tmp;

    -EXPORT_SYMBOL(find_next_zero_bit);
    + while (size & ~(BITS_PER_LONG-1)) {
    + if ((tmp = *(p++)))
    + goto found;
    + result += BITS_PER_LONG;
    + size -= BITS_PER_LONG;
    + }
    + if (!size)
    + return result;
    +
    + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
    + if (tmp == 0UL) /* Are any bits set? */
    + return result + size; /* Nope. */
    +found:
    + return result + __ffs(tmp);
    +}
    +EXPORT_SYMBOL(__find_first_bit);
    +
    +/*
    + * Find the first cleared bit in a memory region.
    + */
    +unsigned long __find_first_zero_bit(const unsigned long *addr,
    + unsigned long size)
    +{
    + const unsigned long *p = addr;
    + unsigned long result = 0;
    + unsigned long tmp;
    +
    + while (size & ~(BITS_PER_LONG-1)) {
    + if (~(tmp = *(p++)))
    + goto found;
    + result += BITS_PER_LONG;
    + size -= BITS_PER_LONG;
    + }
    + if (!size)
    + return result;
    +
    + tmp = (*p) | (~0UL << size);
    + if (tmp == ~0UL) /* Are any bits zero? */
    + return result + size; /* Nope. */
    +found:
    + return result + ffz(tmp);
    +}
    +EXPORT_SYMBOL(__find_first_zero_bit);
    +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */

    #ifdef __BIG_ENDIAN

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  12. Re: [git pull] generic bitops, take 2

    > then it fails with:
    >
    > mm/filemap.c: In function '__generic_file_aio_write_nolock':
    > mm/filemap.c:1831: sorry, unimplemented: inlining failed in call to 'generic_write_checks': function body not available
    >
    > i used:
    >
    > make ARCH=um SUBARCH=x86_64 -j64 linux
    >
    > on x86-64, gcc-4.3.0-0.13. Config is:
    >
    > http://redhat.com/~mingo/misc/.config.um


    I just hit this bug as well:

    make linux ARCH=um

    on i686, gcc 4.3.0, config at
    http://uml.nagafix.co.uk/kernels/kernel32-2.6.25.config

    Thanks.

    -----BEGIN PGP SIGNATURE-----
    Version: GnuPG v1.4.8 (GNU/Linux)

    iEYEARECAAYFAkgTa6kACgkQe81tAgORUJZTcQCeIsS34r5UHg OtZ3dvdO+NDhnu
    fH8An3G0ELWYyDObvmCLySqkUEGf5VE6
    =B5F/
    -----END PGP SIGNATURE-----


  13. [2.6 patch] fs/udf/partition.c:udf_get_pblock() mustn't be inline

    This patch fixes the following build error with UML and gcc 4.3:

    <-- snip -->

    ....
    CC fs/udf/partition.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/udf/partition.c: In function ‘udf_get_pblock_virt15’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/udf/partition.c:32: sorry, unimplemented: inlining failed in call to ‘udf_get_pblock’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/udf/partition.c:102: sorry, unimplemented: called from here
    make[3]: *** [fs/udf/partition.o] Error 1

    <-- snip -->

    Signed-off-by: Adrian Bunk

    ---
    9739b5f94fdf781f71cbb4c7b2f8f6d9e3d58e55 diff --git a/fs/udf/partition.c b/fs/udf/partition.c
    index 63610f0..96dfd20 100644
    --- a/fs/udf/partition.c
    +++ b/fs/udf/partition.c
    @@ -27,8 +27,8 @@
    #include
    #include

    -inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
    - uint16_t partition, uint32_t offset)
    +uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
    + uint16_t partition, uint32_t offset)
    {
    struct udf_sb_info *sbi = UDF_SB(sb);
    struct udf_part_map *map;

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  14. [2.6 patch] net/ipv4/ip_output.c:ip_send_check() mustn't be inline

    This patch fixes the following build error with UML and gcc 4.3:

    <-- snip -->

    ....
    CC net/ipv4/ip_output.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/net/ipv4/ip_output.c: In function ‘__ip_local_out’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/net/ipv4/ip_output.c:89: sorry, unimplemented: inlining failed in call to ‘ip_send_check’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/net/ipv4/ip_output.c:99: sorry, unimplemented: called from here
    make[3]: *** [net/ipv4/ip_output.o] Error 1

    <-- snip -->

    Signed-off-by: Adrian Bunk

    ---
    936e490777359bfde622148ff5af1e19d8c248b5 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
    index 0834926..985a719 100644
    --- a/net/ipv4/ip_output.c
    +++ b/net/ipv4/ip_output.c
    @@ -85,7 +85,7 @@
    int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;

    /* Generate a checksum for an outgoing IP datagram. */
    -__inline__ void ip_send_check(struct iphdr *iph)
    +void ip_send_check(struct iphdr *iph)
    {
    iph->check = 0;
    iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  15. [2.6 patch] reiserfs: some functions mustn't be inline

    This patch fixes the following build errors with UML and gcc 4.3:

    <-- snip -->

    ....
    CC fs/reiserfs/do_balan.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/do_balan.c: In function ‘balance_leaf’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/do_balan.c:34: sorry, unimplemented: inlining failed in call to ‘do_balance_mark_leaf_dirty’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/do_balan.c:1018: sorry, unimplemented: called from here
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/do_balan.c:34: sorry, unimplemented: inlining failed in call to ‘do_balance_mark_leaf_dirty’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/do_balan.c:1174: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/do_balan.o] Error 1
    ....
    CC fs/reiserfs/namei.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/namei.c: In function ‘search_by_entry_key’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/namei.c:66: sorry, unimplemented: inlining failed in call to ‘set_de_name_and_namelen’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/namei.c:162: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/namei.o] Error 1
    ....
    CC fs/reiserfs/inode.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/inode.c: In function ‘reiserfs_get_block’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/inode.c:108: sorry, unimplemented: inlining failed in call to ‘make_le_item_head’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/inode.c:776: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/inode.o] Error 1
    ....
    CC fs/reiserfs/stree.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c: In function ‘comp_keys’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:86: sorry, unimplemented: inlining failed in call to ‘comp_short_keys’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:111: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/stree.o] Error 1
    ....
    CC fs/reiserfs/stree.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c: In function ‘get_lkey’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:61: sorry, unimplemented: inlining failed in call to ‘B_IS_IN_TREE’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:265: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/stree.o] Error 1
    ....
    CC fs/reiserfs/stree.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c: In function ‘key_in_buffer’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:294: sorry, unimplemented: inlining failed in call to ‘get_rkey’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:356: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/stree.o] Error 1
    ....
    CC fs/reiserfs/stree.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c: In function ‘prepare_for_delete_or_cut’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:74: sorry, unimplemented: inlining failed in call to ‘copy_item_head’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:1041: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/stree.o] Error 1
    ....
    CC fs/reiserfs/stree.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c: In function ‘reiserfs_delete_solid_item’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:153: sorry, unimplemented: inlining failed in call to ‘le_key2cpu_key’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:1335: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/stree.o] Error 1
    ....
    CC fs/reiserfs/stree.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c: In function ‘decrement_counters_in_path’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:362: sorry, unimplemented: inlining failed in call to ‘decrement_bcount’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/reiserfs/stree.c:387: sorry, unimplemented: called from here
    make[3]: *** [fs/reiserfs/stree.o] Error 1

    <-- snip -->

    Signed-off-by: Adrian Bunk

    ---

    fs/reiserfs/do_balan.c | 4 ++--
    fs/reiserfs/inode.c | 8 ++++----
    fs/reiserfs/namei.c | 2 +-
    fs/reiserfs/stree.c | 17 ++++++++---------
    4 files changed, 15 insertions(+), 16 deletions(-)

    da1507ac13cfd50db5dddc0186e0ff1f1ae26517 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
    index 7ee4208..42be2a6 100644
    --- a/fs/reiserfs/do_balan.c
    +++ b/fs/reiserfs/do_balan.c
    @@ -29,8 +29,8 @@ struct tree_balance *cur_tb = NULL; /* detects whether more than one
    is interrupting do_balance */
    #endif

    -inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
    - struct buffer_head *bh, int flag)
    +void do_balance_mark_leaf_dirty(struct tree_balance *tb,
    + struct buffer_head *bh, int flag)
    {
    journal_mark_dirty(tb->transaction_handle,
    tb->transaction_handle->t_super, bh);
    diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
    index 5791793..3e06b99 100644
    --- a/fs/reiserfs/inode.c
    +++ b/fs/reiserfs/inode.c
    @@ -101,10 +101,10 @@ void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
    //
    // when key is 0, do not set version and short key
    //
    -inline void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
    - int version,
    - loff_t offset, int type, int length,
    - int entry_count /*or ih_free_space */ )
    +void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
    + int version,
    + loff_t offset, int type, int length,
    + int entry_count /*or ih_free_space */ )
    {
    if (key) {
    ih->ih_key.k_dir_id = cpu_to_le32(key->on_disk_key.k_dir_id);
    diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
    index 8867533..f592296 100644
    --- a/fs/reiserfs/namei.c
    +++ b/fs/reiserfs/namei.c
    @@ -62,7 +62,7 @@ static inline void set_de_item_location(struct reiserfs_dir_entry *de,
    }

    // de_bh, de_ih, de_deh (points to first element of array), de_item_num is set
    -inline void set_de_name_and_namelen(struct reiserfs_dir_entry *de)
    +void set_de_name_and_namelen(struct reiserfs_dir_entry *de)
    {
    struct reiserfs_de_head *deh = de->de_deh + de->de_entry_num;

    diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
    index d2db241..9b84b4c 100644
    --- a/fs/reiserfs/stree.c
    +++ b/fs/reiserfs/stree.c
    @@ -57,7 +57,7 @@
    #include

    /* Does the buffer contain a disk block which is in the tree. */
    -inline int B_IS_IN_TREE(const struct buffer_head *p_s_bh)
    +int B_IS_IN_TREE(const struct buffer_head *p_s_bh)
    {

    RFALSE(B_LEVEL(p_s_bh) > MAX_HEIGHT,
    @@ -69,8 +69,7 @@ inline int B_IS_IN_TREE(const struct buffer_head *p_s_bh)
    //
    // to gets item head in le form
    //
    -inline void copy_item_head(struct item_head *p_v_to,
    - const struct item_head *p_v_from)
    +void copy_item_head(struct item_head *p_v_to, const struct item_head *p_v_from)
    {
    memcpy(p_v_to, p_v_from, IH_SIZE);
    }
    @@ -81,8 +80,8 @@ inline void copy_item_head(struct item_head *p_v_to,
    Returns: -1 if key1 < key2
    0 if key1 == key2
    1 if key1 > key2 */
    -inline int comp_short_keys(const struct reiserfs_key *le_key,
    - const struct cpu_key *cpu_key)
    +int comp_short_keys(const struct reiserfs_key *le_key,
    + const struct cpu_key *cpu_key)
    {
    __u32 n;
    n = le32_to_cpu(le_key->k_dir_id);
    @@ -150,7 +149,7 @@ inline int comp_short_le_keys(const struct reiserfs_key *key1,
    return 0;
    }

    -inline void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from)
    +void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from)
    {
    int version;
    to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id);
    @@ -289,8 +288,8 @@ static inline const struct reiserfs_key *get_lkey(const struct treepath
    }

    /* Get delimiting key of the buffer at the path and its right neighbor. */
    -inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path,
    - const struct super_block *p_s_sb)
    +const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path,
    + const struct super_block *p_s_sb)
    {
    int n_position, n_path_offset = p_s_chk_path->path_length;
    struct buffer_head *p_s_parent;
    @@ -359,7 +358,7 @@ static inline int key_in_buffer(struct treepath *p_s_chk_path, /* Path which sho
    return 1;
    }

    -inline void decrement_bcount(struct buffer_head *p_s_bh)
    +void decrement_bcount(struct buffer_head *p_s_bh)
    {
    if (p_s_bh) {
    if (atomic_read(&(p_s_bh->b_count))) {

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  16. [2.6 patch] fs/buffer.c:init_buffer() mustn't be inline

    This patch fixes the following build error with UML and gcc 4.3:

    <-- snip -->

    ....
    CC fs/buffer.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/buffer.c: In function ‘init_page_buffers’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/buffer.c:51: sorry, unimplemented: inlining failed in call to ‘init_buffer’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/buffer.c:1007: sorry, unimplemented: called from here
    make[2]: *** [fs/buffer.o] Error 1

    <-- snip -->

    Signed-off-by: Adrian Bunk

    ---
    922267dbec9ea632537ab61ef9cb2d0d5f9d8f9e diff --git a/fs/buffer.c b/fs/buffer.c
    index 39ff144..d74743d 100644
    --- a/fs/buffer.c
    +++ b/fs/buffer.c
    @@ -46,8 +46,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);

    #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)

    -inline void
    -init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
    +void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
    {
    bh->b_end_io = handler;
    bh->b_private = private;

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  17. [2.6 patch] mm/filemap.c:generic_write_checks() mustn't be inline

    On Sat, Apr 26, 2008 at 07:22:22PM +0200, Ingo Molnar wrote:
    >...
    > then it fails with:
    >
    > mm/filemap.c: In function '__generic_file_aio_write_nolock':
    > mm/filemap.c:1831: sorry, unimplemented: inlining failed in call to 'generic_write_checks': function body not available
    >
    > i used:
    >
    > make ARCH=um SUBARCH=x86_64 -j64 linux
    >
    > on x86-64, gcc-4.3.0-0.13. Config is:
    >
    > http://redhat.com/~mingo/misc/.config.um
    >...


    Patch below.

    > Ingo
    >...


    cu
    Adrian


    <-- snip -->


    This patch fixes the following build error with UML and gcc 4.3:

    <-- snip -->

    ....
    CC mm/filemap.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/mm/filemap.c: In function ‘__generic_file_aio_write_nolock’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/mm/filemap.c:1831: sorry, unimplemented: inlining failed in call to ‘generic_write_checks’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/mm/filemap.c:2383: sorry, unimplemented: called from here
    make[2]: *** [mm/filemap.o] Error 1

    <-- snip -->

    Reported-by: Ingo Molnar
    Signed-off-by: Adrian Bunk

    ---
    e4a4374513a2b23060bb385d83f1e67427b1b665 diff --git a/mm/filemap.c b/mm/filemap.c
    index 07e9d92..576b553 100644
    --- a/mm/filemap.c
    +++ b/mm/filemap.c
    @@ -1827,7 +1827,8 @@ EXPORT_SYMBOL(iov_iter_single_seg_count);
    * Returns appropriate error code that caller should return or
    * zero in case that write should be allowed.
    */
    -inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
    +int generic_write_checks(struct file *file, loff_t *pos, size_t *count,
    + int isblk)
    {
    struct inode *inode = file->f_mapping->host;
    unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  18. [2.6 patch] fs/block_dev.c:I_BDEV() mustn't be inline

    This patch fixes the following build error with UML and gcc 4.3:

    <-- snip -->

    ....
    CC fs/block_dev.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/block_dev.c: In function ‘blkdev_get_block’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/block_dev.c:42: sorry, unimplemented: inlining failed in call to ‘I_BDEV’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/block_dev.c:119: sorry, unimplemented: called from here
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/block_dev.c:42: sorry, unimplemented: inlining failed in call to ‘I_BDEV’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/block_dev.c:131: sorry, unimplemented: called from here
    make[2]: *** [fs/block_dev.o] Error 1

    <-- snip -->

    Signed-off-by: Adrian Bunk

    ---
    cd324c3df8a964c6abe03828861238ea3083ba02 diff --git a/fs/block_dev.c b/fs/block_dev.c
    index 7d822fa..ce9ea3e 100644
    --- a/fs/block_dev.c
    +++ b/fs/block_dev.c
    @@ -38,7 +38,7 @@ static inline struct bdev_inode *BDEV_I(struct inode *inode)
    return container_of(inode, struct bdev_inode, vfs_inode);
    }

    -inline struct block_device *I_BDEV(struct inode *inode)
    +struct block_device *I_BDEV(struct inode *inode)
    {
    return &BDEV_I(inode)->bdev;
    }

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  19. [2.6 patch] block/blk-barrier.c:blk_ordered_cur_seq() mustn't be inline

    This patch fixes the following build error with UML and gcc 4.3:

    <-- snip -->

    ....
    CC block/blk-barrier.o
    /home/bunk/linux/kernel-2.6/git/linux-2.6/block/blk-barrier.c: In function ‘blk_do_ordered’:
    /home/bunk/linux/kernel-2.6/git/linux-2.6/block/blk-barrier.c:57: sorry, unimplemented: inlining failed in call to ‘blk_ordered_cur_seq’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/block/blk-barrier.c:252: sorry, unimplemented: called from here
    /home/bunk/linux/kernel-2.6/git/linux-2.6/block/blk-barrier.c:57: sorry, unimplemented: inlining failed in call to ‘blk_ordered_cur_seq’: function body not available
    /home/bunk/linux/kernel-2.6/git/linux-2.6/block/blk-barrier.c:253: sorry, unimplemented: called from here
    make[2]: *** [block/blk-barrier.o] Error 1

    <-- snip -->

    Signed-off-by: Adrian Bunk

    ---
    e322a237766808e2c198f4521a9c8ba1fae6b77c diff --git a/block/blk-barrier.c b/block/blk-barrier.c
    index 55c5f1f..2b9f2e9 100644
    --- a/block/blk-barrier.c
    +++ b/block/blk-barrier.c
    @@ -53,7 +53,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
    /*
    * Cache flushing for ordered writes handling
    */
    -inline unsigned blk_ordered_cur_seq(struct request_queue *q)
    +unsigned blk_ordered_cur_seq(struct request_queue *q)
    {
    if (!q->ordseq)
    return 0;

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  20. Re: [2.6 patch] fs/buffer.c:init_buffer() mustn't be inline



    On Mon, 28 Apr 2008, Adrian Bunk wrote:
    <
    > This patch fixes the following build error with UML and gcc 4.3:
    >
    > <-- snip -->
    >
    > ...
    > CC fs/buffer.o
    > /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/buffer.c: In function ‘init_page_buffers’:
    > /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/buffer.c:51: sorry, unimplemented: inlining failed in call to ‘init_buffer’: function body not available
    > /home/bunk/linux/kernel-2.6/git/linux-2.6/fs/buffer.c:1007: sorry, unimplemented: called from here
    > make[2]: *** [fs/buffer.o] Error 1


    Can somebody tell why this is not a gcc bug?

    Linus
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread
Page 1 of 2 1 2 LastLast