[PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only - Kernel

This is a discussion on [PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only - Kernel ; Cleanup to standardize formatting of .h files Checkpatch now reports "mostly" clean. Checkpatch reports errors and warnings that are inappropriate for asm files. Ingo's code-quality script totals for include/asm-x86 Errors LOC Before: 1457 31320 After: 252 31729 Changes: s/__asm__/asm/g s/__volatile__/volatile/g ...

+ Reply to Thread
Page 1 of 10 1 2 3 ... LastLast
Results 1 to 20 of 190

Thread: [PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only

  1. [PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only

    Cleanup to standardize formatting of .h files

    Checkpatch now reports "mostly" clean.

    Checkpatch reports errors and warnings that are inappropriate for
    asm files.

    Ingo's code-quality script totals for include/asm-x86

    Errors LOC
    Before: 1457 31320
    After: 252 31729

    Changes:

    s/__asm__/asm/g
    s/__volatile__/volatile/g
    s/__inline__/inline/g
    80 columns
    no spaces before casts
    do {} while macro formatting

    No objdump -D code changes in x86 defconfig and allyesconfig

    There are __LINE__, __DATE__, and __TIME__ changes
    in the objects, but no code changes as far I can tell.


    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. [PATCH 015/148] include/asm-x86/checksum_64.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/checksum_64.h | 118 ++++++++++++++++++++---------------------
    1 files changed, 57 insertions(+), 61 deletions(-)

    diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h
    index e5f7999..8bd861c 100644
    --- a/include/asm-x86/checksum_64.h
    +++ b/include/asm-x86/checksum_64.h
    @@ -1,33 +1,31 @@
    #ifndef _X86_64_CHECKSUM_H
    #define _X86_64_CHECKSUM_H

    -/*
    - * Checksums for x86-64
    - * Copyright 2002 by Andi Kleen, SuSE Labs
    +/*
    + * Checksums for x86-64
    + * Copyright 2002 by Andi Kleen, SuSE Labs
    * with some code from asm-x86/checksum.h
    - */
    + */

    #include
    #include
    #include

    -/**
    +/**
    * csum_fold - Fold and invert a 32bit checksum.
    * sum: 32bit unfolded sum
    - *
    + *
    * Fold a 32bit running checksum to 16bit and invert it. This is usually
    * the last step before putting a checksum into a packet.
    * Make sure not to mix with 64bit checksums.
    */
    static inline __sum16 csum_fold(__wsum sum)
    {
    - __asm__(
    - " addl %1,%0\n"
    - " adcl $0xffff,%0"
    - : "=r" (sum)
    - : "r" ((__force u32)sum << 16),
    - "0" ((__force u32)sum & 0xffff0000)
    - );
    + asm(" addl %1,%0\n"
    + " adcl $0xffff,%0"
    + : "=r" (sum)
    + : "r" ((__force u32)sum << 16),
    + "0" ((__force u32)sum & 0xffff0000));
    return (__force __sum16)(~(__force u32)sum >> 16);
    }

    @@ -43,46 +41,46 @@ static inline __sum16 csum_fold(__wsum sum)
    * ip_fast_csum - Compute the IPv4 header checksum efficiently.
    * iph: ipv4 header
    * ihl: length of header / 4
    - */
    + */
    static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
    {
    unsigned int sum;

    - asm( " movl (%1), %0\n"
    - " subl $4, %2\n"
    - " jbe 2f\n"
    - " addl 4(%1), %0\n"
    - " adcl 8(%1), %0\n"
    - " adcl 12(%1), %0\n"
    - "1: adcl 16(%1), %0\n"
    - " lea 4(%1), %1\n"
    - " decl %2\n"
    - " jne 1b\n"
    - " adcl $0, %0\n"
    - " movl %0, %2\n"
    - " shrl $16, %0\n"
    - " addw %w2, %w0\n"
    - " adcl $0, %0\n"
    - " notl %0\n"
    - "2:"
    + asm(" movl (%1), %0\n"
    + " subl $4, %2\n"
    + " jbe 2f\n"
    + " addl 4(%1), %0\n"
    + " adcl 8(%1), %0\n"
    + " adcl 12(%1), %0\n"
    + "1: adcl 16(%1), %0\n"
    + " lea 4(%1), %1\n"
    + " decl %2\n"
    + " jne 1b\n"
    + " adcl $0, %0\n"
    + " movl %0, %2\n"
    + " shrl $16, %0\n"
    + " addw %w2, %w0\n"
    + " adcl $0, %0\n"
    + " notl %0\n"
    + "2:"
    /* Since the input registers which are loaded with iph and ihl
    are modified, we must also specify them as outputs, or gcc
    will assume they contain their original values. */
    - : "=r" (sum), "=r" (iph), "=r" (ihl)
    - : "1" (iph), "2" (ihl)
    - : "memory");
    + : "=r" (sum), "=r" (iph), "=r" (ihl)
    + : "1" (iph), "2" (ihl)
    + : "memory");
    return (__force __sum16)sum;
    }

    -/**
    +/**
    * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
    * @saddr: source address
    * @daddr: destination address
    * @len: length of packet
    * @proto: ip protocol of packet
    - * @sum: initial sum to be added in (32bit unfolded)
    - *
    - * Returns the pseudo header checksum the input data. Result is
    + * @sum: initial sum to be added in (32bit unfolded)
    + *
    + * Returns the pseudo header checksum the input data. Result is
    * 32bit unfolded.
    */
    static inline __wsum
    @@ -93,32 +91,32 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
    " adcl %2, %0\n"
    " adcl %3, %0\n"
    " adcl $0, %0\n"
    - : "=r" (sum)
    + : "=r" (sum)
    : "g" (daddr), "g" (saddr),
    "g" ((len + proto)<<8), "0" (sum));
    - return sum;
    + return sum;
    }


    -/**
    +/**
    * csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
    * @saddr: source address
    * @daddr: destination address
    * @len: length of packet
    * @proto: ip protocol of packet
    - * @sum: initial sum to be added in (32bit unfolded)
    - *
    + * @sum: initial sum to be added in (32bit unfolded)
    + *
    * Returns the 16bit pseudo header checksum the input data already
    * complemented and ready to be filled in.
    */
    -static inline __sum16
    -csum_tcpudp_magic(__be32 saddr, __be32 daddr,
    - unsigned short len, unsigned short proto, __wsum sum)
    +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
    + unsigned short len,
    + unsigned short proto, __wsum sum)
    {
    - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto ,sum));
    + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
    }

    -/**
    +/**
    * csum_partial - Compute an internet checksum.
    * @buff: buffer to be checksummed
    * @len: length of buffer.
    @@ -127,7 +125,7 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr,
    * Returns the 32bit unfolded internet checksum of the buffer.
    * Before filling it in it needs to be csum_fold()'ed.
    * buff should be aligned to a 64bit boundary if possible.
    - */
    + */
    extern __wsum csum_partial(const void *buff, int len, __wsum sum);

    #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
    @@ -136,23 +134,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);

    /* Do not call this directly. Use the wrappers below */
    extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
    - int len,
    - __wsum sum,
    - int *src_err_ptr, int *dst_err_ptr);
    + int len, __wsum sum,
    + int *src_err_ptr, int *dst_err_ptr);


    extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
    - int len, __wsum isum, int *errp);
    + int len, __wsum isum, int *errp);
    extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
    - int len, __wsum isum, int *errp);
    -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
    - __wsum sum);
    + int len, __wsum isum, int *errp);
    +extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
    + int len, __wsum sum);

    /* Old names. To be removed. */
    #define csum_and_copy_to_user csum_partial_copy_to_user
    #define csum_and_copy_from_user csum_partial_copy_from_user

    -/**
    +/**
    * ip_compute_csum - Compute an 16bit IP checksum.
    * @buff: buffer address.
    * @len: length of buffer.
    @@ -170,7 +167,7 @@ extern __sum16 ip_compute_csum(const void *buff, int len);
    * @proto: protocol of packet
    * @sum: initial sum (32bit unfolded) to be added in
    *
    - * Computes an IPv6 pseudo header checksum. This sum is added the checksum
    + * Computes an IPv6 pseudo header checksum. This sum is added the checksum
    * into UDP/TCP packets and contains some link layer information.
    * Returns the unfolded 32bit checksum.
    */
    @@ -185,11 +182,10 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
    static inline unsigned add32_with_carry(unsigned a, unsigned b)
    {
    asm("addl %2,%0\n\t"
    - "adcl $0,%0"
    - : "=r" (a)
    + "adcl $0,%0"
    + : "=r" (a)
    : "0" (a), "r" (b));
    return a;
    }

    #endif
    -
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. [PATCH 014/148] include/asm-x86/checksum_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/checksum_32.h | 152 ++++++++++++++++++++---------------------
    1 files changed, 75 insertions(+), 77 deletions(-)

    diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h
    index 75194ab..52bbb0d 100644
    --- a/include/asm-x86/checksum_32.h
    +++ b/include/asm-x86/checksum_32.h
    @@ -28,7 +28,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
    */

    asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
    - int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
    + int len, __wsum sum,
    + int *src_err_ptr, int *dst_err_ptr);

    /*
    * Note: when you get a NULL pointer exception here this means someone
    @@ -37,20 +38,20 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
    * If you use these functions directly please don't forget the
    * access_ok().
    */
    -static __inline__
    -__wsum csum_partial_copy_nocheck (const void *src, void *dst,
    - int len, __wsum sum)
    +static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
    + int len, __wsum sum)
    {
    - return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
    + return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
    }

    -static __inline__
    -__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
    - int len, __wsum sum, int *err_ptr)
    +static inline __wsum csum_partial_copy_from_user(const void __user *src,
    + void *dst,
    + int len, __wsum sum,
    + int *err_ptr)
    {
    might_sleep();
    return csum_partial_copy_generic((__force void *)src, dst,
    - len, sum, err_ptr, NULL);
    + len, sum, err_ptr, NULL);
    }

    /*
    @@ -64,30 +65,29 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
    {
    unsigned int sum;

    - __asm__ __volatile__(
    - "movl (%1), %0 ;\n"
    - "subl $4, %2 ;\n"
    - "jbe 2f ;\n"
    - "addl 4(%1), %0 ;\n"
    - "adcl 8(%1), %0 ;\n"
    - "adcl 12(%1), %0 ;\n"
    -"1: adcl 16(%1), %0 ;\n"
    - "lea 4(%1), %1 ;\n"
    - "decl %2 ;\n"
    - "jne 1b ;\n"
    - "adcl $0, %0 ;\n"
    - "movl %0, %2 ;\n"
    - "shrl $16, %0 ;\n"
    - "addw %w2, %w0 ;\n"
    - "adcl $0, %0 ;\n"
    - "notl %0 ;\n"
    -"2: ;\n"
    + asm volatile("movl (%1), %0 ;\n"
    + "subl $4, %2 ;\n"
    + "jbe 2f ;\n"
    + "addl 4(%1), %0 ;\n"
    + "adcl 8(%1), %0 ;\n"
    + "adcl 12(%1), %0;\n"
    + "1: adcl 16(%1), %0 ;\n"
    + "lea 4(%1), %1 ;\n"
    + "decl %2 ;\n"
    + "jne 1b ;\n"
    + "adcl $0, %0 ;\n"
    + "movl %0, %2 ;\n"
    + "shrl $16, %0 ;\n"
    + "addw %w2, %w0 ;\n"
    + "adcl $0, %0 ;\n"
    + "notl %0 ;\n"
    + "2: ;\n"
    /* Since the input registers which are loaded with iph and ihl
    are modified, we must also specify them as outputs, or gcc
    will assume they contain their original values. */
    - : "=r" (sum), "=r" (iph), "=r" (ihl)
    - : "1" (iph), "2" (ihl)
    - : "memory");
    + : "=r" (sum), "=r" (iph), "=r" (ihl)
    + : "1" (iph), "2" (ihl)
    + : "memory");
    return (__force __sum16)sum;
    }

    @@ -97,29 +97,27 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)

    static inline __sum16 csum_fold(__wsum sum)
    {
    - __asm__(
    - "addl %1, %0 ;\n"
    - "adcl $0xffff, %0 ;\n"
    - : "=r" (sum)
    - : "r" ((__force u32)sum << 16),
    - "0" ((__force u32)sum & 0xffff0000)
    - );
    + asm("addl %1, %0 ;\n"
    + "adcl $0xffff, %0 ;\n"
    + : "=r" (sum)
    + : "r" ((__force u32)sum << 16),
    + "0" ((__force u32)sum & 0xffff0000));
    return (__force __sum16)(~(__force u32)sum >> 16);
    }

    static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
    - unsigned short len,
    - unsigned short proto,
    - __wsum sum)
    + unsigned short len,
    + unsigned short proto,
    + __wsum sum)
    {
    - __asm__(
    - "addl %1, %0 ;\n"
    - "adcl %2, %0 ;\n"
    - "adcl %3, %0 ;\n"
    - "adcl $0, %0 ;\n"
    - : "=r" (sum)
    - : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum));
    - return sum;
    + asm("addl %1, %0 ;\n"
    + "adcl %2, %0 ;\n"
    + "adcl %3, %0 ;\n"
    + "adcl $0, %0 ;\n"
    + : "=r" (sum)
    + : "g" (daddr), "g"(saddr),
    + "g" ((len + proto) << 8), "0" (sum));
    + return sum;
    }

    /*
    @@ -127,11 +125,11 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
    * returns a 16-bit checksum, already complemented
    */
    static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
    - unsigned short len,
    - unsigned short proto,
    - __wsum sum)
    + unsigned short len,
    + unsigned short proto,
    + __wsum sum)
    {
    - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto ,sum));
    + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
    }

    /*
    @@ -141,30 +139,29 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,

    static inline __sum16 ip_compute_csum(const void *buff, int len)
    {
    - return csum_fold (csum_partial(buff, len, 0));
    + return csum_fold(csum_partial(buff, len, 0));
    }

    #define _HAVE_ARCH_IPV6_CSUM
    -static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
    - const struct in6_addr *daddr,
    - __u32 len, unsigned short proto,
    - __wsum sum)
    +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
    + const struct in6_addr *daddr,
    + __u32 len, unsigned short proto,
    + __wsum sum)
    {
    - __asm__(
    - "addl 0(%1), %0 ;\n"
    - "adcl 4(%1), %0 ;\n"
    - "adcl 8(%1), %0 ;\n"
    - "adcl 12(%1), %0 ;\n"
    - "adcl 0(%2), %0 ;\n"
    - "adcl 4(%2), %0 ;\n"
    - "adcl 8(%2), %0 ;\n"
    - "adcl 12(%2), %0 ;\n"
    - "adcl %3, %0 ;\n"
    - "adcl %4, %0 ;\n"
    - "adcl $0, %0 ;\n"
    - : "=&r" (sum)
    - : "r" (saddr), "r" (daddr),
    - "r"(htonl(len)), "r"(htonl(proto)), "0"(sum));
    + asm("addl 0(%1), %0 ;\n"
    + "adcl 4(%1), %0 ;\n"
    + "adcl 8(%1), %0 ;\n"
    + "adcl 12(%1), %0 ;\n"
    + "adcl 0(%2), %0 ;\n"
    + "adcl 4(%2), %0 ;\n"
    + "adcl 8(%2), %0 ;\n"
    + "adcl 12(%2), %0 ;\n"
    + "adcl %3, %0 ;\n"
    + "adcl %4, %0 ;\n"
    + "adcl $0, %0 ;\n"
    + : "=&r" (sum)
    + : "r" (saddr), "r" (daddr),
    + "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));

    return csum_fold(sum);
    }
    @@ -173,14 +170,15 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
    * Copy and checksum to user
    */
    #define HAVE_CSUM_COPY_USER
    -static __inline__ __wsum csum_and_copy_to_user(const void *src,
    - void __user *dst,
    - int len, __wsum sum,
    - int *err_ptr)
    +static inline __wsum csum_and_copy_to_user(const void *src,
    + void __user *dst,
    + int len, __wsum sum,
    + int *err_ptr)
    {
    might_sleep();
    if (access_ok(VERIFY_WRITE, dst, len))
    - return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr);
    + return csum_partial_copy_generic(src, (__force void *)dst,
    + len, sum, NULL, err_ptr);

    if (len)
    *err_ptr = -EFAULT;
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. [PATCH 080/148] include/asm-x86/numa_64.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/numa_64.h | 5 +++--
    1 files changed, 3 insertions(+), 2 deletions(-)

    diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
    index 15fe07c..32c22ae 100644
    --- a/include/asm-x86/numa_64.h
    +++ b/include/asm-x86/numa_64.h
    @@ -1,11 +1,12 @@
    -#ifndef _ASM_X8664_NUMA_H
    +#ifndef _ASM_X8664_NUMA_H
    #define _ASM_X8664_NUMA_H 1

    #include
    #include

    struct bootnode {
    - u64 start,end;
    + u64 start;
    + u64 end;
    };

    extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  5. [PATCH 097/148] include/asm-x86/pgtable.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/pgtable.h | 197 ++++++++++++++++++++++++++++++++-------------
    1 files changed, 142 insertions(+), 55 deletions(-)

    diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
    index 0e31e72..2533f79 100644
    --- a/include/asm-x86/pgtable.h
    +++ b/include/asm-x86/pgtable.h
    @@ -18,27 +18,28 @@
    #define _PAGE_BIT_UNUSED2 10
    #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
    #define _PAGE_BIT_HIDDEN 11
    -#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
    +#define _PAGE_BIT_NX 63 /* No execute:
    + * only valid after cpuid check */

    /*
    * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a
    * sign-extended value on 32-bit with all 1's in the upper word,
    * which preserves the upper pte values on 64-bit ptes:
    */
    -#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
    -#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
    -#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
    -#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
    -#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
    -#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
    -#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
    -#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
    -#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
    -#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
    -#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
    -#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
    -#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
    -#define _PAGE_HIDDEN (_AC(1, L)<<_PAGE_BIT_HIDDEN)
    +#define _PAGE_PRESENT (_AC(1, L) << _PAGE_BIT_PRESENT)
    +#define _PAGE_RW (_AC(1, L) << _PAGE_BIT_RW)
    +#define _PAGE_USER (_AC(1, L) << _PAGE_BIT_USER)
    +#define _PAGE_PWT (_AC(1, L) << _PAGE_BIT_PWT)
    +#define _PAGE_PCD (_AC(1, L) << _PAGE_BIT_PCD)
    +#define _PAGE_ACCESSED (_AC(1, L) << _PAGE_BIT_ACCESSED)
    +#define _PAGE_DIRTY (_AC(1, L) << _PAGE_BIT_DIRTY)
    +#define _PAGE_PSE (_AC(1, L) << _PAGE_BIT_PSE) /* 2MB page */
    +#define _PAGE_GLOBAL (_AC(1, L) << _PAGE_BIT_GLOBAL) /* Global TLB entry */
    +#define _PAGE_UNUSED1 (_AC(1, L) << _PAGE_BIT_UNUSED1)
    +#define _PAGE_UNUSED2 (_AC(1, L) << _PAGE_BIT_UNUSED2)
    +#define _PAGE_PAT (_AC(1, L) << _PAGE_BIT_PAT)
    +#define _PAGE_PAT_LARGE (_AC(1, L) << _PAGE_BIT_PAT_LARGE)
    +#define _PAGE_HIDDEN (_AC(1, L) << _PAGE_BIT_HIDDEN)

    #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
    #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
    @@ -47,12 +48,15 @@
    #endif

    /* If _PAGE_PRESENT is clear, we use these: */
    -#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
    +#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
    + * saved PTE; unset:swap */
    #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
    pte_present gives true */

    -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
    -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
    +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
    + _PAGE_ACCESSED | _PAGE_DIRTY)
    +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
    + _PAGE_DIRTY)

    #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)

    @@ -63,14 +67,20 @@
    #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)

    #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
    -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
    -
    -#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
    -#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
    -#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
    +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
    + _PAGE_ACCESSED | _PAGE_NX)
    +
    +#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
    + _PAGE_USER | _PAGE_ACCESSED)
    +#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
    + _PAGE_ACCESSED | _PAGE_NX)
    +#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
    + _PAGE_ACCESSED)
    #define PAGE_COPY PAGE_COPY_NOEXEC
    -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
    -#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
    +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
    + _PAGE_ACCESSED | _PAGE_NX)
    +#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
    + _PAGE_ACCESSED)

    #ifdef CONFIG_X86_32
    #define _PAGE_KERNEL_EXEC \
    @@ -88,11 +98,13 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;

    #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
    #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
    -#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
    +#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | \
    + _PAGE_PWT)
    #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
    #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
    #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
    -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
    +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | \
    + _PAGE_PWT)
    #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
    #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)

    @@ -139,7 +151,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
    * ZERO_PAGE is a global shared page that is always zero: used
    * for zero-mapped memory areas etc..
    */
    -extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
    +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
    #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

    extern spinlock_t pgd_lock;
    @@ -149,30 +161,101 @@ extern struct list_head pgd_list;
    * The following only work if pte_present() is true.
    * Undefined behaviour if not..
    */
    -static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
    -static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
    -static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
    -static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
    -static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
    -static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; }
    -static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); }
    -
    -static inline int pmd_large(pmd_t pte) {
    - return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
    - (_PAGE_PSE|_PAGE_PRESENT);
    +static inline int pte_dirty(pte_t pte)
    +{
    + return pte_val(pte) & _PAGE_DIRTY;
    +}
    +
    +static inline int pte_young(pte_t pte)
    +{
    + return pte_val(pte) & _PAGE_ACCESSED;
    +}
    +
    +static inline int pte_write(pte_t pte)
    +{
    + return pte_val(pte) & _PAGE_RW;
    +}
    +
    +static inline int pte_file(pte_t pte)
    +{
    + return pte_val(pte) & _PAGE_FILE;
    +}
    +
    +static inline int pte_huge(pte_t pte)
    +{
    + return pte_val(pte) & _PAGE_PSE;
    }

    -static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); }
    -static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); }
    -static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); }
    -static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); }
    -static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
    -static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
    -static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
    -static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
    -static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); }
    -static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); }
    -static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); }
    +static inline int pte_global(pte_t pte)
    +{
    + return pte_val(pte) & _PAGE_GLOBAL;
    +}
    +
    +static inline int pte_exec(pte_t pte)
    +{
    + return !(pte_val(pte) & _PAGE_NX);
    +}
    +
    +static inline int pmd_large(pmd_t pte)
    +{
    + return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
    + (_PAGE_PSE | _PAGE_PRESENT);
    +}
    +
    +static inline pte_t pte_mkclean(pte_t pte)
    +{
    + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY);
    +}
    +
    +static inline pte_t pte_mkold(pte_t pte)
    +{
    + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED);
    +}
    +
    +static inline pte_t pte_wrprotect(pte_t pte)
    +{
    + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW);
    +}
    +
    +static inline pte_t pte_mkexec(pte_t pte)
    +{
    + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX);
    +}
    +
    +static inline pte_t pte_mkdirty(pte_t pte)
    +{
    + return __pte(pte_val(pte) | _PAGE_DIRTY);
    +}
    +
    +static inline pte_t pte_mkyoung(pte_t pte)
    +{
    + return __pte(pte_val(pte) | _PAGE_ACCESSED);
    +}
    +
    +static inline pte_t pte_mkwrite(pte_t pte)
    +{
    + return __pte(pte_val(pte) | _PAGE_RW);
    +}
    +
    +static inline pte_t pte_mkhuge(pte_t pte)
    +{
    + return __pte(pte_val(pte) | _PAGE_PSE);
    +}
    +
    +static inline pte_t pte_clrhuge(pte_t pte)
    +{
    + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE);
    +}
    +
    +static inline pte_t pte_mkglobal(pte_t pte)
    +{
    + return __pte(pte_val(pte) | _PAGE_GLOBAL);
    +}
    +
    +static inline pte_t pte_clrglobal(pte_t pte)
    +{
    + return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
    +}

    extern pteval_t __supported_pte_mask;

    @@ -210,9 +293,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
    #define __HAVE_PHYS_MEM_ACCESS_PROT
    struct file;
    pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
    - unsigned long size, pgprot_t vma_prot);
    + unsigned long size, pgprot_t vma_prot);
    int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
    - unsigned long size, pgprot_t *vma_prot);
    + unsigned long size, pgprot_t *vma_prot);
    #endif

    #ifdef CONFIG_PARAVIRT
    @@ -331,7 +414,8 @@ extern int ptep_clear_flush_young(struct vm_area_struct *vma,
    unsigned long address, pte_t *ptep);

    #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
    -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
    +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
    + pte_t *ptep)
    {
    pte_t pte = native_ptep_get_and_clear(ptep);
    pte_update(mm, addr, ptep);
    @@ -339,7 +423,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
    }

    #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
    -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
    +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
    + unsigned long addr, pte_t *ptep,
    + int full)
    {
    pte_t pte;
    if (full) {
    @@ -355,7 +441,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
    }

    #define __HAVE_ARCH_PTEP_SET_WRPROTECT
    -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
    +static inline void ptep_set_wrprotect(struct mm_struct *mm,
    + unsigned long addr, pte_t *ptep)
    {
    clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
    pte_update(mm, addr, ptep);
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  6. [PATCH 065/148] include/asm-x86/mc146818rtc.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/mc146818rtc.h | 16 ++++++++++------
    1 files changed, 10 insertions(+), 6 deletions(-)

    diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h
    index cdd9f96..daf1ccd 100644
    --- a/include/asm-x86/mc146818rtc.h
    +++ b/include/asm-x86/mc146818rtc.h
    @@ -42,7 +42,7 @@ extern volatile unsigned long cmos_lock;
    static inline void lock_cmos(unsigned char reg)
    {
    unsigned long new;
    - new = ((smp_processor_id()+1) << 8) | reg;
    + new = ((smp_processor_id() + 1) << 8) | reg;
    for (; {
    if (cmos_lock) {
    cpu_relax();
    @@ -57,22 +57,26 @@ static inline void unlock_cmos(void)
    {
    cmos_lock = 0;
    }
    +
    static inline int do_i_have_lock_cmos(void)
    {
    - return (cmos_lock >> 8) == (smp_processor_id()+1);
    + return (cmos_lock >> 8) == (smp_processor_id() + 1);
    }
    +
    static inline unsigned char current_lock_cmos_reg(void)
    {
    return cmos_lock & 0xff;
    }
    -#define lock_cmos_prefix(reg) \
    +
    +#define lock_cmos_prefix(reg) \
    do { \
    unsigned long cmos_flags; \
    local_irq_save(cmos_flags); \
    lock_cmos(reg)
    -#define lock_cmos_suffix(reg) \
    - unlock_cmos(); \
    - local_irq_restore(cmos_flags); \
    +
    +#define lock_cmos_suffix(reg) \
    + unlock_cmos(); \
    + local_irq_restore(cmos_flags); \
    } while (0)
    #else
    #define lock_cmos_prefix(reg) do {} while (0)
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  7. [PATCH 063/148] include/asm-x86/linkage.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/linkage.h | 2 +-
    1 files changed, 1 insertions(+), 1 deletions(-)

    diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
    index 4e1c2ca..3c7b440 100644
    --- a/include/asm-x86/linkage.h
    +++ b/include/asm-x86/linkage.h
    @@ -11,7 +11,7 @@

    #ifdef CONFIG_X86_32
    #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
    -#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret))
    +#define prevent_tail_call(ret) asm("" : "=r" (ret) : "0" (ret))
    /*
    * For 32-bit UML - mark functions implemented in assembly that use
    * regparm input parameters:
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  8. [PATCH 082/148] include/asm-x86/page_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/page_32.h | 9 ++++++---
    1 files changed, 6 insertions(+), 3 deletions(-)

    diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
    index 5f7257f..424e82f 100644
    --- a/include/asm-x86/page_32.h
    +++ b/include/asm-x86/page_32.h
    @@ -47,7 +47,10 @@ typedef unsigned long pgdval_t;
    typedef unsigned long pgprotval_t;
    typedef unsigned long phys_addr_t;

    -typedef union { pteval_t pte, pte_low; } pte_t;
    +typedef union {
    + pteval_t pte;
    + pteval_t pte_low;
    +} pte_t;

    #endif /* __ASSEMBLY__ */
    #endif /* CONFIG_X86_PAE */
    @@ -61,7 +64,7 @@ typedef struct page *pgtable_t;
    #endif

    #ifndef __ASSEMBLY__
    -#define __phys_addr(x) ((x)-PAGE_OFFSET)
    +#define __phys_addr(x) ((x) - PAGE_OFFSET)
    #define __phys_reloc_hide(x) RELOC_HIDE((x), 0)

    #ifdef CONFIG_FLATMEM
    @@ -78,7 +81,7 @@ extern unsigned int __VMALLOC_RESERVE;
    extern int sysctl_legacy_va_layout;

    #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
    -#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
    +#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)

    #ifdef CONFIG_X86_USE_3DNOW
    #include
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  9. [PATCH 059/148] include/asm-x86/kvm_host.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/kvm_host.h | 24 +++++++++++++-----------
    1 files changed, 13 insertions(+), 11 deletions(-)

    diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
    index 4702b04..68ee390 100644
    --- a/include/asm-x86/kvm_host.h
    +++ b/include/asm-x86/kvm_host.h
    @@ -22,15 +22,16 @@

    #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
    #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
    -#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
    +#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
    + 0xFFFFFF0000000000ULL)

    -#define KVM_GUEST_CR0_MASK \
    +#define KVM_GUEST_CR0_MASK \
    (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
    | X86_CR0_NW | X86_CR0_CD)
    -#define KVM_VM_CR0_ALWAYS_ON \
    +#define KVM_VM_CR0_ALWAYS_ON \
    (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
    | X86_CR0_MP)
    -#define KVM_GUEST_CR4_MASK \
    +#define KVM_GUEST_CR4_MASK \
    (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
    #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
    #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
    @@ -133,12 +134,12 @@ struct kvm_pte_chain {
    union kvm_mmu_page_role {
    unsigned word;
    struct {
    - unsigned glevels : 4;
    - unsigned level : 4;
    - unsigned quadrant : 2;
    - unsigned pad_for_nice_hex_output : 6;
    - unsigned metaphysical : 1;
    - unsigned access : 3;
    + unsigned glevels:4;
    + unsigned level:4;
    + unsigned quadrant:2;
    + unsigned pad_for_nice_hex_output:6;
    + unsigned metaphysical:1;
    + unsigned access:3;
    };
    };

    @@ -606,6 +607,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
    #define TSS_BASE_SIZE 0x68
    #define TSS_IOPB_SIZE (65536 / 8)
    #define TSS_REDIRECTION_SIZE (256 / 8)
    -#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
    +#define RMODE_TSS_SIZE \
    + (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)

    #endif
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  10. [PATCH 085/148] include/asm-x86/paravirt.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/paravirt.h | 47 ++++++++++++++++++++++++++-----------------
    1 files changed, 28 insertions(+), 19 deletions(-)

    diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
    index 3433c12..a7f046f 100644
    --- a/include/asm-x86/paravirt.h
    +++ b/include/asm-x86/paravirt.h
    @@ -233,7 +233,8 @@ struct pv_mmu_ops {
    void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
    pte_t *ptep, pte_t pteval);
    void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
    - void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
    + void (*pte_update)(struct mm_struct *mm, unsigned long addr,
    + pte_t *ptep);
    void (*pte_update_defer)(struct mm_struct *mm,
    unsigned long addr, pte_t *ptep);

    @@ -248,7 +249,8 @@ struct pv_mmu_ops {
    void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
    void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
    pte_t *ptep, pte_t pte);
    - void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
    + void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
    + pte_t *ptep);
    void (*pmd_clear)(pmd_t *pmdp);

    #endif /* CONFIG_X86_PAE */
    @@ -276,8 +278,7 @@ struct pv_mmu_ops {
    /* This contains all the paravirt structures: we get a convenient
    * number for each function using the offset which we use to indicate
    * what to patch. */
    -struct paravirt_patch_template
    -{
    +struct paravirt_patch_template {
    struct pv_init_ops pv_init_ops;
    struct pv_time_ops pv_time_ops;
    struct pv_cpu_ops pv_cpu_ops;
    @@ -662,32 +663,37 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
    }

    /* These should all do BUG_ON(_err), but our headers are too tangled. */
    -#define rdmsr(msr,val1,val2) do { \
    +#define rdmsr(msr, val1, val2) \
    +do { \
    int _err; \
    u64 _l = paravirt_read_msr(msr, &_err); \
    val1 = (u32)_l; \
    val2 = _l >> 32; \
    -} while(0)
    +} while (0)

    -#define wrmsr(msr,val1,val2) do { \
    +#define wrmsr(msr, val1, val2) \
    +do { \
    paravirt_write_msr(msr, val1, val2); \
    -} while(0)
    +} while (0)

    -#define rdmsrl(msr,val) do { \
    +#define rdmsrl(msr, val) \
    +do { \
    int _err; \
    val = paravirt_read_msr(msr, &_err); \
    -} while(0)
    +} while (0)

    -#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
    -#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
    +#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
    +#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)

    /* rdmsr with exception handling */
    -#define rdmsr_safe(msr,a,b) ({ \
    +#define rdmsr_safe(msr, a, b) \
    +({ \
    int _err; \
    u64 _l = paravirt_read_msr(msr, &_err); \
    (*a) = (u32)_l; \
    (*b) = _l >> 32; \
    - _err; })
    + _err; \
    +})


    static inline u64 paravirt_read_tsc(void)
    @@ -695,10 +701,11 @@ static inline u64 paravirt_read_tsc(void)
    return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
    }

    -#define rdtscl(low) do { \
    +#define rdtscl(low) \
    +do { \
    u64 _l = paravirt_read_tsc(); \
    low = (int)_l; \
    -} while(0)
    +} while (0)

    #define rdtscll(val) (val = paravirt_read_tsc())

    @@ -713,11 +720,12 @@ static inline unsigned long long paravirt_read_pmc(int counter)
    return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
    }

    -#define rdpmc(counter,low,high) do { \
    +#define rdpmc(counter, low, high) \
    +do { \
    u64 _l = paravirt_read_pmc(counter); \
    low = (u32)_l; \
    high = _l >> 32; \
    -} while(0)
    +} while (0)

    static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
    {
    @@ -796,7 +804,8 @@ static inline void set_iopl_mask(unsigned mask)
    }

    /* The paravirtualized I/O functions */
    -static inline void slow_down_io(void) {
    +static inline void slow_down_io(void)
    +{
    pv_cpu_ops.io_delay();
    #ifdef REALLY_SLOW_IO
    pv_cpu_ops.io_delay();
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  11. [PATCH 079/148] include/asm-x86/mutex_64.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/mutex_64.h | 73 ++++++++++++++++++++-----------------------
    1 files changed, 34 insertions(+), 39 deletions(-)

    diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
    index 6c2949a..f3fae9b 100644
    --- a/include/asm-x86/mutex_64.h
    +++ b/include/asm-x86/mutex_64.h
    @@ -16,23 +16,21 @@
    *
    * Atomically decrements @v and calls if the result is negative.
    */
    -#define __mutex_fastpath_lock(v, fail_fn) \
    -do { \
    - unsigned long dummy; \
    - \
    - typecheck(atomic_t *, v); \
    - typecheck_fn(void (*)(atomic_t *), fail_fn); \
    - \
    - __asm__ __volatile__( \
    - LOCK_PREFIX " decl (%%rdi) \n" \
    - " jns 1f \n" \
    - " call "#fail_fn" \n" \
    - "1:" \
    - \
    - :"=D" (dummy) \
    - : "D" (v) \
    - : "rax", "rsi", "rdx", "rcx", \
    - "r8", "r9", "r10", "r11", "memory"); \
    +#define __mutex_fastpath_lock(v, fail_fn) \
    +do { \
    + unsigned long dummy; \
    + \
    + typecheck(atomic_t *, v); \
    + typecheck_fn(void (*)(atomic_t *), fail_fn); \
    + \
    + asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
    + " jns 1f \n" \
    + " call " #fail_fn "\n" \
    + "1:" \
    + : "=D" (dummy) \
    + : "D" (v) \
    + : "rax", "rsi", "rdx", "rcx", \
    + "r8", "r9", "r10", "r11", "memory"); \
    } while (0)

    /**
    @@ -45,9 +43,8 @@ do { \
    * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
    * or anything the slow path function returns
    */
    -static inline int
    -__mutex_fastpath_lock_retval(atomic_t *count,
    - int (*fail_fn)(atomic_t *))
    +static inline int __mutex_fastpath_lock_retval(atomic_t *count,
    + int (*fail_fn)(atomic_t *))
    {
    if (unlikely(atomic_dec_return(count) < 0))
    return fail_fn(count);
    @@ -62,23 +59,21 @@ __mutex_fastpath_lock_retval(atomic_t *count,
    *
    * Atomically increments @v and calls if the result is nonpositive.
    */
    -#define __mutex_fastpath_unlock(v, fail_fn) \
    -do { \
    - unsigned long dummy; \
    - \
    - typecheck(atomic_t *, v); \
    - typecheck_fn(void (*)(atomic_t *), fail_fn); \
    - \
    - __asm__ __volatile__( \
    - LOCK_PREFIX " incl (%%rdi) \n" \
    - " jg 1f \n" \
    - " call "#fail_fn" \n" \
    - "1: " \
    - \
    - :"=D" (dummy) \
    - : "D" (v) \
    - : "rax", "rsi", "rdx", "rcx", \
    - "r8", "r9", "r10", "r11", "memory"); \
    +#define __mutex_fastpath_unlock(v, fail_fn) \
    +do { \
    + unsigned long dummy; \
    + \
    + typecheck(atomic_t *, v); \
    + typecheck_fn(void (*)(atomic_t *), fail_fn); \
    + \
    + asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
    + " jg 1f\n" \
    + " call " #fail_fn "\n" \
    + "1:" \
    + : "=D" (dummy) \
    + : "D" (v) \
    + : "rax", "rsi", "rdx", "rcx", \
    + "r8", "r9", "r10", "r11", "memory"); \
    } while (0)

    #define __mutex_slowpath_needs_to_unlock() 1
    @@ -93,8 +88,8 @@ do { \
    * if it wasn't 1 originally. [the fallback function is never used on
    * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
    */
    -static inline int
    -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
    +static inline int __mutex_fastpath_trylock(atomic_t *count,
    + int (*fail_fn)(atomic_t *))
    {
    if (likely(atomic_cmpxchg(count, 1, 0) == 1))
    return 1;
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  12. [PATCH 048/148] include/asm-x86/io_64.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/io_64.h | 110 +++++++++++++++++++++++++++++------------------
    1 files changed, 68 insertions(+), 42 deletions(-)

    diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
    index 084d60c..c3346bf 100644
    --- a/include/asm-x86/io_64.h
    +++ b/include/asm-x86/io_64.h
    @@ -58,60 +58,75 @@ static inline void slow_down_io(void)
    /*
    * Talk about misusing macros..
    */
    -#define __OUT1(s,x) \
    +#define __OUT1(s, x) \
    static inline void out##s(unsigned x value, unsigned short port) {

    -#define __OUT2(s,s1,s2) \
    -__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
    +#define __OUT2(s, s1, s2) \
    +asm volatile ("out" #s " %" s1 "0,%" s2 "1"

    #ifndef REALLY_SLOW_IO
    #define REALLY_SLOW_IO
    #define UNSET_REALLY_SLOW_IO
    #endif

    -#define __OUT(s,s1,x) \
    -__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
    -__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
    - slow_down_io(); }
    +#define __OUT(s, s1, x) \
    + __OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
    + } \
    + __OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
    + slow_down_io(); \
    +}

    -#define __IN1(s) \
    -static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
    +#define __IN1(s) \
    +static inline RETURN_TYPE in##s(unsigned short port) \
    +{ \
    + RETURN_TYPE _v;

    -#define __IN2(s,s1,s2) \
    -__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
    +#define __IN2(s, s1, s2) \
    + asm volatile ("in" #s " %" s2 "1,%" s1 "0"

    -#define __IN(s,s1,i...) \
    -__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); return _v; } \
    -__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
    - slow_down_io(); return _v; }
    +#define __IN(s, s1, i...) \
    + __IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
    + return _v; \
    + } \
    + __IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
    + slow_down_io(); \
    + return _v; }

    #ifdef UNSET_REALLY_SLOW_IO
    #undef REALLY_SLOW_IO
    #endif

    -#define __INS(s) \
    -static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
    -{ __asm__ __volatile__ ("rep ; ins" #s \
    -: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
    +#define __INS(s) \
    +static inline void ins##s(unsigned short port, void *addr, \
    + unsigned long count) \
    +{ \
    + asm volatile ("rep ; ins" #s \
    + : "=D" (addr), "=c" (count) \
    + : "d" (port), "0" (addr), "1" (count)); \
    +}

    -#define __OUTS(s) \
    -static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
    -{ __asm__ __volatile__ ("rep ; outs" #s \
    -: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
    +#define __OUTS(s) \
    +static inline void outs##s(unsigned short port, const void *addr, \
    + unsigned long count) \
    +{ \
    + asm volatile ("rep ; outs" #s \
    + : "=S" (addr), "=c" (count) \
    + : "d" (port), "0" (addr), "1" (count)); \
    +}

    #define RETURN_TYPE unsigned char
    -__IN(b,"")
    +__IN(b, "")
    #undef RETURN_TYPE
    #define RETURN_TYPE unsigned short
    -__IN(w,"")
    +__IN(w, "")
    #undef RETURN_TYPE
    #define RETURN_TYPE unsigned int
    -__IN(l,"")
    +__IN(l, "")
    #undef RETURN_TYPE

    -__OUT(b,"b",char)
    -__OUT(w,"w",short)
    -__OUT(l,,int)
    +__OUT(b, "b", char)
    +__OUT(w, "w", short)
    +__OUT(l, , int)

    __INS(b)
    __INS(w)
    @@ -132,12 +147,12 @@ __OUTS(l)
    * Change virtual addresses to physical addresses and vv.
    * These are pretty trivial
    */
    -static inline unsigned long virt_to_phys(volatile void * address)
    +static inline unsigned long virt_to_phys(volatile void *address)
    {
    return __pa(address);
    }

    -static inline void * phys_to_virt(unsigned long address)
    +static inline void *phys_to_virt(unsigned long address)
    {
    return __va(address);
    }
    @@ -200,18 +215,22 @@ static inline __u8 __readb(const volatile void __iomem *addr)
    {
    return *(__force volatile __u8 *)addr;
    }
    +
    static inline __u16 __readw(const volatile void __iomem *addr)
    {
    return *(__force volatile __u16 *)addr;
    }
    +
    static __always_inline __u32 __readl(const volatile void __iomem *addr)
    {
    return *(__force volatile __u32 *)addr;
    }
    +
    static inline __u64 __readq(const volatile void __iomem *addr)
    {
    return *(__force volatile __u64 *)addr;
    }
    +
    #define readb(x) __readb(x)
    #define readw(x) __readw(x)
    #define readl(x) __readl(x)
    @@ -231,37 +250,44 @@ static inline void __writel(__u32 b, volatile void __iomem *addr)
    {
    *(__force volatile __u32 *)addr = b;
    }
    +
    static inline void __writeq(__u64 b, volatile void __iomem *addr)
    {
    *(__force volatile __u64 *)addr = b;
    }
    +
    static inline void __writeb(__u8 b, volatile void __iomem *addr)
    {
    *(__force volatile __u8 *)addr = b;
    }
    +
    static inline void __writew(__u16 b, volatile void __iomem *addr)
    {
    *(__force volatile __u16 *)addr = b;
    }
    -#define writeq(val,addr) __writeq((val),(addr))
    -#define writel(val,addr) __writel((val),(addr))
    -#define writew(val,addr) __writew((val),(addr))
    -#define writeb(val,addr) __writeb((val),(addr))
    +
    +#define writeq(val, addr) __writeq((val), (addr))
    +#define writel(val, addr) __writel((val), (addr))
    +#define writew(val, addr) __writew((val), (addr))
    +#define writeb(val, addr) __writeb((val), (addr))
    #define __raw_writeb writeb
    #define __raw_writew writew
    #define __raw_writel writel
    #define __raw_writeq writeq

    -void __memcpy_fromio(void*,unsigned long,unsigned);
    -void __memcpy_toio(unsigned long,const void*,unsigned);
    +void __memcpy_fromio(void *, unsigned long, unsigned);
    +void __memcpy_toio(unsigned long, const void *, unsigned);

    -static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
    +static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
    + unsigned len)
    {
    - __memcpy_fromio(to,(unsigned long)from,len);
    + __memcpy_fromio(to, (unsigned long)from, len);
    }
    -static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
    +
    +static inline void memcpy_toio(volatile void __iomem *to, const void *from,
    + unsigned len)
    {
    - __memcpy_toio((unsigned long)to,from,len);
    + __memcpy_toio((unsigned long)to, from, len);
    }

    void memset_io(volatile void __iomem *a, int b, size_t c);
    @@ -276,7 +302,7 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
    */
    #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))

    -#define flush_write_buffers()
    +#define flush_write_buffers()

    extern int iommu_bio_merge;
    #define BIO_VMERGE_BOUNDARY iommu_bio_merge
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  13. [PATCH 062/148] include/asm-x86/lguest.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/lguest.h | 11 ++++-------
    1 files changed, 4 insertions(+), 7 deletions(-)

    diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
    index 9b17571..be4a724 100644
    --- a/include/asm-x86/lguest.h
    +++ b/include/asm-x86/lguest.h
    @@ -34,8 +34,7 @@ extern const char lgstart_iret[], lgend_iret[];
    extern void lguest_iret(void);
    extern void lguest_init(void);

    -struct lguest_regs
    -{
    +struct lguest_regs {
    /* Manually saved part. */
    unsigned long eax, ebx, ecx, edx;
    unsigned long esi, edi, ebp;
    @@ -51,8 +50,7 @@ struct lguest_regs
    };

    /* This is a guest-specific page (mapped ro) into the guest. */
    -struct lguest_ro_state
    -{
    +struct lguest_ro_state {
    /* Host information we need to restore when we switch back. */
    u32 host_cr3;
    struct desc_ptr host_idt_desc;
    @@ -67,8 +65,7 @@ struct lguest_ro_state
    struct desc_struct guest_gdt[GDT_ENTRIES];
    };

    -struct lg_cpu_arch
    -{
    +struct lg_cpu_arch {
    /* The GDT entries copied into lguest_ro_state when running. */
    struct desc_struct gdt[GDT_ENTRIES];

    @@ -85,7 +82,7 @@ static inline void lguest_set_ts(void)

    cr0 = read_cr0();
    if (!(cr0 & 8))
    - write_cr0(cr0|8);
    + write_cr0(cr0 | 8);
    }

    /* Full 4G segment descriptors, suitable for CS and DS. */
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  14. [PATCH 098/148] include/asm-x86/posix_types_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/posix_types_32.h | 47 +++++++++++++++++++++----------------
    1 files changed, 27 insertions(+), 20 deletions(-)

    diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h
    index 015e539..b031efd 100644
    --- a/include/asm-x86/posix_types_32.h
    +++ b/include/asm-x86/posix_types_32.h
    @@ -45,32 +45,39 @@ typedef struct {
    #if defined(__KERNEL__)

    #undef __FD_SET
    -#define __FD_SET(fd,fdsetp) \
    - __asm__ __volatile__("btsl %1,%0": \
    - "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
    +#define __FD_SET(fd,fdsetp) \
    + asm volatile("btsl %1,%0": \
    + "+m" (*(__kernel_fd_set *)(fdsetp)) \
    + : "r" ((int)(fd)))

    #undef __FD_CLR
    -#define __FD_CLR(fd,fdsetp) \
    - __asm__ __volatile__("btrl %1,%0": \
    - "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
    +#define __FD_CLR(fd,fdsetp) \
    + asm volatile("btrl %1,%0": \
    + "+m" (*(__kernel_fd_set *)(fdsetp)) \
    + : "r" ((int) (fd)))

    #undef __FD_ISSET
    -#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
    - unsigned char __result; \
    - __asm__ __volatile__("btl %1,%2 ; setb %0" \
    - :"=q" (__result) :"r" ((int) (fd)), \
    - "m" (*(__kernel_fd_set *) (fdsetp))); \
    - __result; }))
    +#define __FD_ISSET(fd,fdsetp) \
    + (__extension__ \
    + ({ \
    + unsigned char __result; \
    + asm volatile("btl %1,%2 ; setb %0" \
    + : "=q" (__result) \
    + : "r" ((int)(fd)), \
    + "m" (*(__kernel_fd_set *)(fdsetp))); \
    + __result; \
    +}))

    #undef __FD_ZERO
    -#define __FD_ZERO(fdsetp) \
    -do { \
    - int __d0, __d1; \
    - __asm__ __volatile__("cld ; rep ; stosl" \
    - :"=m" (*(__kernel_fd_set *) (fdsetp)), \
    - "=&c" (__d0), "=&D" (__d1) \
    - :"a" (0), "1" (__FDSET_LONGS), \
    - "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
    +#define __FD_ZERO(fdsetp) \
    +do { \
    + int __d0, __d1; \
    + asm volatile("cld ; rep ; stosl" \
    + : "=m" (*(__kernel_fd_set *)(fdsetp)), \
    + "=&c" (__d0), "=&D" (__d1) \
    + : "a" (0), "1" (__FDSET_LONGS), \
    + "2" ((__kernel_fd_set *)(fdsetp)) \
    + : "memory"); \
    } while (0)

    #endif /* defined(__KERNEL__) */
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  15. [PATCH 064/148] include/asm-x86/local.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/local.h | 105 ++++++++++++++++++++++------------------------
    1 files changed, 50 insertions(+), 55 deletions(-)

    diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
    index f852c62..330a724 100644
    --- a/include/asm-x86/local.h
    +++ b/include/asm-x86/local.h
    @@ -18,32 +18,28 @@ typedef struct {

    static inline void local_inc(local_t *l)
    {
    - __asm__ __volatile__(
    - _ASM_INC "%0"
    - :"+m" (l->a.counter));
    + asm volatile(_ASM_INC "%0"
    + : "+m" (l->a.counter));
    }

    static inline void local_dec(local_t *l)
    {
    - __asm__ __volatile__(
    - _ASM_DEC "%0"
    - :"+m" (l->a.counter));
    + asm volatile(_ASM_DEC "%0"
    + : "+m" (l->a.counter));
    }

    static inline void local_add(long i, local_t *l)
    {
    - __asm__ __volatile__(
    - _ASM_ADD "%1,%0"
    - :"+m" (l->a.counter)
    - :"ir" (i));
    + asm volatile(_ASM_ADD "%1,%0"
    + : "+m" (l->a.counter)
    + : "ir" (i));
    }

    static inline void local_sub(long i, local_t *l)
    {
    - __asm__ __volatile__(
    - _ASM_SUB "%1,%0"
    - :"+m" (l->a.counter)
    - :"ir" (i));
    + asm volatile(_ASM_SUB "%1,%0"
    + : "+m" (l->a.counter)
    + : "ir" (i));
    }

    /**
    @@ -59,10 +55,9 @@ static inline int local_sub_and_test(long i, local_t *l)
    {
    unsigned char c;

    - __asm__ __volatile__(
    - _ASM_SUB "%2,%0; sete %1"
    - :"+m" (l->a.counter), "=qm" (c)
    - :"ir" (i) : "memory");
    + asm volatile(_ASM_SUB "%2,%0; sete %1"
    + : "+m" (l->a.counter), "=qm" (c)
    + : "ir" (i) : "memory");
    return c;
    }

    @@ -78,10 +73,9 @@ static inline int local_dec_and_test(local_t *l)
    {
    unsigned char c;

    - __asm__ __volatile__(
    - _ASM_DEC "%0; sete %1"
    - :"+m" (l->a.counter), "=qm" (c)
    - : : "memory");
    + asm volatile(_ASM_DEC "%0; sete %1"
    + : "+m" (l->a.counter), "=qm" (c)
    + : : "memory");
    return c != 0;
    }

    @@ -97,10 +91,9 @@ static inline int local_inc_and_test(local_t *l)
    {
    unsigned char c;

    - __asm__ __volatile__(
    - _ASM_INC "%0; sete %1"
    - :"+m" (l->a.counter), "=qm" (c)
    - : : "memory");
    + asm volatile(_ASM_INC "%0; sete %1"
    + : "+m" (l->a.counter), "=qm" (c)
    + : : "memory");
    return c != 0;
    }

    @@ -117,10 +110,9 @@ static inline int local_add_negative(long i, local_t *l)
    {
    unsigned char c;

    - __asm__ __volatile__(
    - _ASM_ADD "%2,%0; sets %1"
    - :"+m" (l->a.counter), "=qm" (c)
    - :"ir" (i) : "memory");
    + asm volatile(_ASM_ADD "%2,%0; sets %1"
    + : "+m" (l->a.counter), "=qm" (c)
    + : "ir" (i) : "memory");
    return c;
    }

    @@ -141,10 +133,9 @@ static inline long local_add_return(long i, local_t *l)
    #endif
    /* Modern 486+ processor */
    __i = i;
    - __asm__ __volatile__(
    - _ASM_XADD "%0, %1;"
    - :"+r" (i), "+m" (l->a.counter)
    - : : "memory");
    + asm volatile(_ASM_XADD "%0, %1;"
    + : "+r" (i), "+m" (l->a.counter)
    + : : "memory");
    return i + __i;

    #ifdef CONFIG_M386
    @@ -182,11 +173,11 @@ static inline long local_sub_return(long i, local_t *l)
    #define local_add_unless(l, a, u) \
    ({ \
    long c, old; \
    - c = local_read(l); \
    + c = local_read((l)); \
    for (; { \
    if (unlikely(c == (u))) \
    break; \
    - old = local_cmpxchg((l), c, c + (a)); \
    + old = local_cmpxchg((l), c, c + (a)); \
    if (likely(old == c)) \
    break; \
    c = old; \
    @@ -214,26 +205,30 @@ static inline long local_sub_return(long i, local_t *l)

    /* Need to disable preemption for the cpu local counters otherwise we could
    still access a variable of a previous CPU in a non atomic way. */
    -#define cpu_local_wrap_v(l) \
    - ({ local_t res__; \
    - preempt_disable(); \
    - res__ = (l); \
    - preempt_enable(); \
    - res__; })
    +#define cpu_local_wrap_v(l) \
    +({ \
    + local_t res__; \
    + preempt_disable(); \
    + res__ = (l); \
    + preempt_enable(); \
    + res__; \
    +})
    #define cpu_local_wrap(l) \
    - ({ preempt_disable(); \
    - l; \
    - preempt_enable(); }) \
    -
    -#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
    -#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
    -#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
    -#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
    -#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
    -#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
    -
    -#define __cpu_local_inc(l) cpu_local_inc(l)
    -#define __cpu_local_dec(l) cpu_local_dec(l)
    +({ \
    + preempt_disable(); \
    + (l); \
    + preempt_enable(); \
    +}) \
    +
    +#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
    +#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
    +#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
    +#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
    +#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
    +#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
    +
    +#define __cpu_local_inc(l) cpu_local_inc((l))
    +#define __cpu_local_dec(l) cpu_local_dec((l))
    #define __cpu_local_add(i, l) cpu_local_add((i), (l))
    #define __cpu_local_sub(i, l) cpu_local_sub((i), (l))

    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  16. [PATCH 057/148] include/asm-x86/kexec.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/kexec.h | 71 +++++++++++++++++++++++------------------------
    1 files changed, 35 insertions(+), 36 deletions(-)

    diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
    index c90d3c7..8f855a1 100644
    --- a/include/asm-x86/kexec.h
    +++ b/include/asm-x86/kexec.h
    @@ -94,10 +94,9 @@ static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
    {
    #ifdef CONFIG_X86_32
    newregs->sp = (unsigned long)&(oldregs->sp);
    - __asm__ __volatile__(
    - "xorl %%eax, %%eax\n\t"
    - "movw %%ss, %%ax\n\t"
    - :"=a"(newregs->ss));
    + asm volatile("xorl %%eax, %%eax\n\t"
    + "movw %%ss, %%ax\n\t"
    + :"=a"(newregs->ss));
    #endif
    }

    @@ -114,39 +113,39 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
    crash_fixup_ss_esp(newregs, oldregs);
    } else {
    #ifdef CONFIG_X86_32
    - __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->bx));
    - __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->cx));
    - __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->dx));
    - __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->si));
    - __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->di));
    - __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->bp));
    - __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->ax));
    - __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->sp));
    - __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
    - __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
    - __asm__ __volatile__("movl %%ds, %%eax;" :"=a"(newregs->ds));
    - __asm__ __volatile__("movl %%es, %%eax;" :"=a"(newregs->es));
    - __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->flags));
    + asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
    + asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
    + asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
    + asm volatile("movl %%esi,%0" : "=m"(newregs->si));
    + asm volatile("movl %%edi,%0" : "=m"(newregs->di));
    + asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
    + asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
    + asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
    + asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
    + asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
    + asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
    + asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
    + asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
    #else
    - __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->bx));
    - __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->cx));
    - __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->dx));
    - __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->si));
    - __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->di));
    - __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->bp));
    - __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->ax));
    - __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->sp));
    - __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
    - __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
    - __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
    - __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
    - __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
    - __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
    - __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
    - __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
    - __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
    - __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
    - __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->flags));
    + asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
    + asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
    + asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
    + asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
    + asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
    + asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
    + asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
    + asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
    + asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
    + asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
    + asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
    + asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
    + asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
    + asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
    + asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
    + asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
    + asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
    + asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
    + asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
    #endif
    newregs->ip = (unsigned long)current_text_addr();
    }
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  17. [PATCH 066/148] include/asm-x86/mca_dma.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/mca_dma.h | 34 +++++++++++++++++-----------------
    1 files changed, 17 insertions(+), 17 deletions(-)

    diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h
    index fbb1f3b..c3dca6e 100644
    --- a/include/asm-x86/mca_dma.h
    +++ b/include/asm-x86/mca_dma.h
    @@ -12,18 +12,18 @@
    * count by 2 when using 16-bit dma; that is not handled by these functions.
    *
    * Ramen Noodles are yummy.
    - *
    - * 1998 Tymm Twillman
    + *
    + * 1998 Tymm Twillman
    */

    /*
    - * Registers that are used by the DMA controller; FN is the function register
    + * Registers that are used by the DMA controller; FN is the function register
    * (tell the controller what to do) and EXE is the execution register (how
    * to do it)
    */

    #define MCA_DMA_REG_FN 0x18
    -#define MCA_DMA_REG_EXE 0x1A
    +#define MCA_DMA_REG_EXE 0x1A

    /*
    * Functions that the DMA controller can do
    @@ -43,9 +43,9 @@

    /*
    * Modes (used by setting MCA_DMA_FN_MODE in the function register)
    - *
    + *
    * Note that the MODE_READ is read from memory (write to device), and
    - * MODE_WRITE is vice-versa.
    + * MODE_WRITE is vice-versa.
    */

    #define MCA_DMA_MODE_XFER 0x04 /* read by default */
    @@ -63,7 +63,7 @@
    * IRQ context.
    */

    -static __inline__ void mca_enable_dma(unsigned int dmanr)
    +static inline void mca_enable_dma(unsigned int dmanr)
    {
    outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN);
    }
    @@ -76,7 +76,7 @@ static __inline__ void mca_enable_dma(unsigned int dmanr)
    * IRQ context.
    */

    -static __inline__ void mca_disable_dma(unsigned int dmanr)
    +static inline void mca_disable_dma(unsigned int dmanr)
    {
    outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN);
    }
    @@ -87,10 +87,10 @@ static __inline__ void mca_disable_dma(unsigned int dmanr)
    * @a: 24bit bus address
    *
    * Load the address register in the DMA controller. This has a 24bit
    - * limitation (16Mb).
    + * limitation (16Mb).
    */

    -static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
    +static inline void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
    {
    outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN);
    outb(a & 0xff, MCA_DMA_REG_EXE);
    @@ -106,14 +106,14 @@ static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a)
    * limitation (16Mb). The return is a bus address.
    */

    -static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
    +static inline unsigned int mca_get_dma_addr(unsigned int dmanr)
    {
    unsigned int addr;

    outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN);
    addr = inb(MCA_DMA_REG_EXE);
    addr |= inb(MCA_DMA_REG_EXE) << 8;
    - addr |= inb(MCA_DMA_REG_EXE) << 16;
    + addr |= inb(MCA_DMA_REG_EXE) << 16;

    return addr;
    }
    @@ -127,7 +127,7 @@ static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr)
    * Setting a count of zero will not do what you expect.
    */

    -static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
    +static inline void mca_set_dma_count(unsigned int dmanr, unsigned int count)
    {
    count--; /* transfers one more than count -- correct for this */

    @@ -144,7 +144,7 @@ static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count)
    * on this DMA channel.
    */

    -static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
    +static inline unsigned int mca_get_dma_residue(unsigned int dmanr)
    {
    unsigned short count;

    @@ -164,12 +164,12 @@ static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr)
    * with an I/O port target.
    */

    -static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
    +static inline void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
    {
    /*
    * DMA from a port address -- set the io address
    */
    -
    +
    outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN);
    outb(io_addr & 0xff, MCA_DMA_REG_EXE);
    outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE);
    @@ -192,7 +192,7 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr)
    * %MCA_DMA_MODE_16 to do 16bit transfers.
    */

    -static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
    +static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
    {
    outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN);
    outb(mode, MCA_DMA_REG_EXE);
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  18. [PATCH 061/148] include/asm-x86/lguest_hcall.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/lguest_hcall.h | 5 ++---
    1 files changed, 2 insertions(+), 3 deletions(-)

    diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
    index 758b9a5..743d888 100644
    --- a/include/asm-x86/lguest_hcall.h
    +++ b/include/asm-x86/lguest_hcall.h
    @@ -46,7 +46,7 @@ hcall(unsigned long call,
    {
    /* "int" is the Intel instruction to trigger a trap. */
    asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
    - /* The call in %eax (aka "a") might be overwritten */
    + /* The call in %eax (aka "a") might be overwritten */
    : "=a"(call)
    /* The arguments are in %eax, %edx, %ebx & %ecx */
    : "a"(call), "d"(arg1), "b"(arg2), "c"(arg3)
    @@ -62,8 +62,7 @@ hcall(unsigned long call,
    #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)

    #define LHCALL_RING_SIZE 64
    -struct hcall_args
    -{
    +struct hcall_args {
    /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */
    unsigned long arg0, arg2, arg3, arg1;
    };
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  19. [PATCH 084/148] include/asm-x86/param.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/param.h | 4 ++--
    1 files changed, 2 insertions(+), 2 deletions(-)

    diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
    index c996ec4..6f0d042 100644
    --- a/include/asm-x86/param.h
    +++ b/include/asm-x86/param.h
    @@ -3,8 +3,8 @@

    #ifdef __KERNEL__
    # define HZ CONFIG_HZ /* Internal kernel timer frequency */
    -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
    -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
    +# define USER_HZ 100 /* some user interfaces are */
    +# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
    #endif

    #ifndef HZ
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  20. [PATCH 070/148] include/asm-x86/mmx.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/mmx.h | 2 +-
    1 files changed, 1 insertions(+), 1 deletions(-)

    diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h
    index 46b71da..9408812 100644
    --- a/include/asm-x86/mmx.h
    +++ b/include/asm-x86/mmx.h
    @@ -6,7 +6,7 @@
    */

    #include
    -
    +
    extern void *_mmx_memcpy(void *to, const void *from, size_t size);
    extern void mmx_clear_page(void *page);
    extern void mmx_copy_page(void *to, void *from);
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread
Page 1 of 10 1 2 3 ... LastLast