[PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only - Kernel

This is a discussion on [PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only - Kernel ; Signed-off-by: Joe Perches --- include/asm-x86/page_64.h | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h index aee05c6..f156778 100644 --- a/include/asm-x86/page_64.h +++ b/include/asm-x86/page_64.h @@ -5,7 +5,7 @@ #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE -#define CURRENT_MASK (~(THREAD_SIZE-1)) +#define ...

+ Reply to Thread
Page 2 of 10 FirstFirst 1 2 3 4 ... LastLast
Results 21 to 40 of 190

Thread: [PATCH 0/148] include/asm-x86: checkpatch cleanups - formatting only

  1. [PATCH 083/148] include/asm-x86/page_64.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/page_64.h | 4 ++--
    1 files changed, 2 insertions(+), 2 deletions(-)

    diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
    index aee05c6..f156778 100644
    --- a/include/asm-x86/page_64.h
    +++ b/include/asm-x86/page_64.h
    @@ -5,7 +5,7 @@

    #define THREAD_ORDER 1
    #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
    -#define CURRENT_MASK (~(THREAD_SIZE-1))
    +#define CURRENT_MASK (~(THREAD_SIZE - 1))

    #define EXCEPTION_STACK_ORDER 0
    #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
    @@ -51,7 +51,7 @@
    * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
    * arch/x86/kernel/head_64.S), and it is mapped here:
    */
    -#define KERNEL_IMAGE_SIZE (512*1024*1024)
    +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
    #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)

    #ifndef __ASSEMBLY__
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. [PATCH 081/148] include/asm-x86/numaq.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/numaq.h | 9 +++++----
    1 files changed, 5 insertions(+), 4 deletions(-)

    diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
    index 38f710d..94b86c3 100644
    --- a/include/asm-x86/numaq.h
    +++ b/include/asm-x86/numaq.h
    @@ -3,7 +3,7 @@
    *
    * Copyright (C) 2002, IBM Corp.
    *
    - * All rights reserved.
    + * All rights reserved.
    *
    * This program is free software; you can redistribute it and/or modify
    * it under the terms of the GNU General Public License as published by
    @@ -33,7 +33,8 @@ extern int get_memcfg_numaq(void);
    /*
    * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
    */
    -#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */
    +#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private
    + quad space */

    /*
    * Communication area for each processor on lynxer-processor tests.
    @@ -139,7 +140,7 @@ struct sys_cfg_data {
    unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */
    unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */
    /* may not be totally populated */
    - unsigned int split_mem_enbl; /* 0 for no low shared memory */
    + unsigned int split_mem_enbl; /* 0 for no low shared memory */
    unsigned int mmio_sz; /* Size of total system memory mapped I/O */
    /* (in MB). */
    unsigned int quad_spin_lock; /* Spare location used for quad */
    @@ -152,7 +153,7 @@ struct sys_cfg_data {
    /*
    * memory configuration area for each quad
    */
    - struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
    + struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */
    };

    static inline unsigned long *get_zholes_size(int nid)
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. [PATCH 067/148] include/asm-x86/mmu_context_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/mmu_context_32.h | 12 ++++++------
    1 files changed, 6 insertions(+), 6 deletions(-)

    diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
    index 8198d1c..9756ae0 100644
    --- a/include/asm-x86/mmu_context_32.h
    +++ b/include/asm-x86/mmu_context_32.h
    @@ -62,7 +62,7 @@ static inline void switch_mm(struct mm_struct *prev,
    BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);

    if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
    - /* We were in lazy tlb mode and leave_mm disabled
    + /* We were in lazy tlb mode and leave_mm disabled
    * tlb flush IPI delivery. We must reload %cr3.
    */
    load_cr3(next->pgd);
    @@ -75,10 +75,10 @@ static inline void switch_mm(struct mm_struct *prev,
    #define deactivate_mm(tsk, mm) \
    asm("movl %0,%%gs": :"r" (0));

    -#define activate_mm(prev, next) \
    - do { \
    - paravirt_activate_mm(prev, next); \
    - switch_mm((prev),(next),NULL); \
    - } while(0);
    +#define activate_mm(prev, next) \
    +do { \
    + paravirt_activate_mm((prev), (next)); \
    + switch_mm((prev), (next), NULL); \
    +} while (0);

    #endif
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. [PATCH 122/148] include/asm-x86/swiotlb.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/swiotlb.h | 28 ++++++++++++++--------------
    1 files changed, 14 insertions(+), 14 deletions(-)

    diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
    index f9c5895..f5d9e74 100644
    --- a/include/asm-x86/swiotlb.h
    +++ b/include/asm-x86/swiotlb.h
    @@ -8,15 +8,15 @@
    extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
    size_t size, int dir);
    extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
    - dma_addr_t *dma_handle, gfp_t flags);
    + dma_addr_t *dma_handle, gfp_t flags);
    extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
    - size_t size, int dir);
    + size_t size, int dir);
    extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
    - dma_addr_t dev_addr,
    - size_t size, int dir);
    + dma_addr_t dev_addr,
    + size_t size, int dir);
    extern void swiotlb_sync_single_for_device(struct device *hwdev,
    - dma_addr_t dev_addr,
    - size_t size, int dir);
    + dma_addr_t dev_addr,
    + size_t size, int dir);
    extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
    dma_addr_t dev_addr,
    unsigned long offset,
    @@ -26,18 +26,18 @@ extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
    unsigned long offset,
    size_t size, int dir);
    extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
    - struct scatterlist *sg, int nelems,
    - int dir);
    + struct scatterlist *sg, int nelems,
    + int dir);
    extern void swiotlb_sync_sg_for_device(struct device *hwdev,
    - struct scatterlist *sg, int nelems,
    - int dir);
    + struct scatterlist *sg, int nelems,
    + int dir);
    extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
    - int nents, int direction);
    + int nents, int direction);
    extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
    - int nents, int direction);
    + int nents, int direction);
    extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
    -extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
    - void *vaddr, dma_addr_t dma_handle);
    +extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
    + void *vaddr, dma_addr_t dma_handle);
    extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
    extern void swiotlb_init(void);

    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  5. [PATCH 127/148] include/asm-x86/thread_info_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/thread_info_32.h | 88 ++++++++++++++++++++------------------
    1 files changed, 46 insertions(+), 42 deletions(-)

    diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
    index 54140ce..5318599 100644
    --- a/include/asm-x86/thread_info_32.h
    +++ b/include/asm-x86/thread_info_32.h
    @@ -20,7 +20,8 @@
    * low level task data that entry.S needs immediate access to
    * - this struct should fit entirely inside of one cache line
    * - this struct shares the supervisor stack pages
    - * - if the contents of this structure are changed, the assembly constants must also be changed
    + * - if the contents of this structure are changed,
    + * the assembly constants must also be changed
    */
    #ifndef __ASSEMBLY__

    @@ -30,18 +31,16 @@ struct thread_info {
    unsigned long flags; /* low level flags */
    unsigned long status; /* thread-synchronous flags */
    __u32 cpu; /* current CPU */
    - int preempt_count; /* 0 => preemptable, <0 => BUG */
    -
    -
    + int preempt_count; /* 0 => preemptable,
    + <0 => BUG */
    mm_segment_t addr_limit; /* thread address space:
    - 0-0xBFFFFFFF for user-thead
    - 0-0xFFFFFFFF for kernel-thread
    + 0-0xBFFFFFFF user-thread
    + 0-0xFFFFFFFF kernel-thread
    */
    void *sysenter_return;
    struct restart_block restart_block;
    -
    - unsigned long previous_esp; /* ESP of the previous stack in case
    - of nested (IRQ) stacks
    + unsigned long previous_esp; /* ESP of the previous stack in
    + case of nested (IRQ) stacks
    */
    __u8 supervisor_stack[0];
    };
    @@ -90,22 +89,23 @@ register unsigned long current_stack_pointer asm("esp") __used;
    /* how to get the thread information struct from C */
    static inline struct thread_info *current_thread_info(void)
    {
    - return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1));
    + return (struct thread_info *)
    + (current_stack_pointer & ~(THREAD_SIZE - 1));
    }

    /* thread information allocation */
    #ifdef CONFIG_DEBUG_STACK_USAGE
    -#define alloc_thread_info(tsk) ((struct thread_info *) \
    - __get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE)))
    +#define alloc_thread_info(tsk) ((struct thread_info *) \
    + __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(THREAD_SIZE)))
    #else
    -#define alloc_thread_info(tsk) ((struct thread_info *) \
    +#define alloc_thread_info(tsk) ((struct thread_info *) \
    __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)))
    #endif

    #else /* !__ASSEMBLY__ */

    /* how to get the thread information struct from ASM */
    -#define GET_THREAD_INFO(reg) \
    +#define GET_THREAD_INFO(reg) \
    movl $-THREAD_SIZE, reg; \
    andl %esp, reg

    @@ -117,14 +117,16 @@ static inline struct thread_info *current_thread_info(void)

    /*
    * thread information flags
    - * - these are process state flags that various assembly files may need to access
    + * - these are process state flags that various
    + * assembly files may need to access
    * - pending work-to-be-done flags are in LSW
    * - other flags in MSW
    */
    #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
    #define TIF_SIGPENDING 1 /* signal pending */
    #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
    -#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
    +#define TIF_SINGLESTEP 3 /* restore singlestep on return to
    + user mode */
    #define TIF_IRET 4 /* return with iret */
    #define TIF_SYSCALL_EMU 5 /* syscall emulation active */
    #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
    @@ -141,36 +143,36 @@ static inline struct thread_info *current_thread_info(void)
    #define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */
    #define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */

    -#define _TIF_SYSCALL_TRACE (1< -#define _TIF_SIGPENDING (1< -#define _TIF_NEED_RESCHED (1< -#define _TIF_SINGLESTEP (1< -#define _TIF_IRET (1< -#define _TIF_SYSCALL_EMU (1< -#define _TIF_SYSCALL_AUDIT (1< -#define _TIF_SECCOMP (1< -#define _TIF_RESTORE_SIGMASK (1< -#define _TIF_HRTICK_RESCHED (1< -#define _TIF_DEBUG (1< -#define _TIF_IO_BITMAP (1< -#define _TIF_FREEZE (1< -#define _TIF_NOTSC (1< -#define _TIF_FORCED_TF (1< -#define _TIF_DEBUGCTLMSR (1< -#define _TIF_DS_AREA_MSR (1< -#define _TIF_BTS_TRACE_TS (1< +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
    +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
    +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
    +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
    +#define _TIF_IRET (1 << TIF_IRET)
    +#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
    +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
    +#define _TIF_SECCOMP (1 << TIF_SECCOMP)
    +#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
    +#define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
    +#define _TIF_DEBUG (1 << TIF_DEBUG)
    +#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
    +#define _TIF_FREEZE (1 << TIF_FREEZE)
    +#define _TIF_NOTSC (1 << TIF_NOTSC)
    +#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
    +#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
    +#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
    +#define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)

    /* work to do on interrupt/exception return */
    -#define _TIF_WORK_MASK \
    - (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
    - _TIF_SECCOMP | _TIF_SYSCALL_EMU))
    +#define _TIF_WORK_MASK \
    + (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
    + _TIF_SECCOMP | _TIF_SYSCALL_EMU))
    /* work to do on any return to u-space */
    #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)

    /* flags to check in __switch_to() */
    -#define _TIF_WORK_CTXSW \
    - (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
    - _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
    +#define _TIF_WORK_CTXSW \
    + (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
    + _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
    #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
    #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG)

    @@ -182,8 +184,10 @@ static inline struct thread_info *current_thread_info(void)
    * ever touches our thread-synchronous status, so we don't
    * have to worry about atomic accesses.
    */
    -#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
    -#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
    +#define TS_USEDFPU 0x0001 /* FPU was used by this task
    + this quantum (SMP) */
    +#define TS_POLLING 0x0002 /* True if in idle loop
    + and not sleeping */

    #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)

    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  6. [PATCH 111/148] include/asm-x86/sigcontext32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/sigcontext32.h | 4 ++--
    1 files changed, 2 insertions(+), 2 deletions(-)

    diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
    index 6ffab4f..57a9686 100644
    --- a/include/asm-x86/sigcontext32.h
    +++ b/include/asm-x86/sigcontext32.h
    @@ -26,7 +26,7 @@ struct _fpstate_ia32 {
    __u32 cw;
    __u32 sw;
    __u32 tag; /* not compatible to 64bit twd */
    - __u32 ipoff;
    + __u32 ipoff;
    __u32 cssel;
    __u32 dataoff;
    __u32 datasel;
    @@ -39,7 +39,7 @@ struct _fpstate_ia32 {
    __u32 mxcsr;
    __u32 reserved;
    struct _fpxreg _fxsr_st[8];
    - struct _xmmreg _xmm[8]; /* It's actually 16 */
    + struct _xmmreg _xmm[8]; /* It's actually 16 */
    __u32 padding[56];
    };

    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  7. [PATCH 123/148] include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/sync_bitops.h | 56 ++++++++++++++++++++--------------------
    1 files changed, 28 insertions(+), 28 deletions(-)

    diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
    index bc249f4..f1078a5 100644
    --- a/include/asm-x86/sync_bitops.h
    +++ b/include/asm-x86/sync_bitops.h
    @@ -13,7 +13,7 @@
    * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
    */

    -#define ADDR (*(volatile long *) addr)
    +#define ADDR (*(volatile long *)addr)

    /**
    * sync_set_bit - Atomically set a bit in memory
    @@ -26,12 +26,12 @@
    * Note that @nr may be almost arbitrarily large; this function is not
    * restricted to acting on a single-word quantity.
    */
    -static inline void sync_set_bit(int nr, volatile unsigned long * addr)
    +static inline void sync_set_bit(int nr, volatile unsigned long *addr)
    {
    - __asm__ __volatile__("lock; btsl %1,%0"
    - :"+m" (ADDR)
    - :"Ir" (nr)
    - : "memory");
    + asm volatile("lock; btsl %1,%0"
    + : "+m" (ADDR)
    + : "Ir" (nr)
    + : "memory");
    }

    /**
    @@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr)
    * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
    * in order to ensure changes are visible on other processors.
    */
    -static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
    +static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
    {
    - __asm__ __volatile__("lock; btrl %1,%0"
    - :"+m" (ADDR)
    - :"Ir" (nr)
    - : "memory");
    + asm volatile("lock; btrl %1,%0"
    + : "+m" (ADDR)
    + : "Ir" (nr)
    + : "memory");
    }

    /**
    @@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
    * Note that @nr may be almost arbitrarily large; this function is not
    * restricted to acting on a single-word quantity.
    */
    -static inline void sync_change_bit(int nr, volatile unsigned long * addr)
    +static inline void sync_change_bit(int nr, volatile unsigned long *addr)
    {
    - __asm__ __volatile__("lock; btcl %1,%0"
    - :"+m" (ADDR)
    - :"Ir" (nr)
    - : "memory");
    + asm volatile("lock; btcl %1,%0"
    + : "+m" (ADDR)
    + : "Ir" (nr)
    + : "memory");
    }

    /**
    @@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr)
    * This operation is atomic and cannot be reordered.
    * It also implies a memory barrier.
    */
    -static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
    +static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
    {
    int oldbit;

    - __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0"
    - :"=r" (oldbit),"+m" (ADDR)
    - :"Ir" (nr) : "memory");
    + asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
    + : "=r" (oldbit), "+m" (ADDR)
    + : "Ir" (nr) : "memory");
    return oldbit;
    }

    @@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
    * This operation is atomic and cannot be reordered.
    * It also implies a memory barrier.
    */
    -static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
    +static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
    {
    int oldbit;

    - __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0"
    - :"=r" (oldbit),"+m" (ADDR)
    - :"Ir" (nr) : "memory");
    + asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
    + : "=r" (oldbit), "+m" (ADDR)
    + : "Ir" (nr) : "memory");
    return oldbit;
    }

    @@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
    * This operation is atomic and cannot be reordered.
    * It also implies a memory barrier.
    */
    -static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr)
    +static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
    {
    int oldbit;

    - __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0"
    - :"=r" (oldbit),"+m" (ADDR)
    - :"Ir" (nr) : "memory");
    + asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
    + : "=r" (oldbit), "+m" (ADDR)
    + : "Ir" (nr) : "memory");
    return oldbit;
    }

    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  8. [PATCH 118/148] include/asm-x86/string_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/string_32.h | 141 +++++++++++++++++++++----------------------
    1 files changed, 70 insertions(+), 71 deletions(-)

    diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
    index 32afb63..c18e533 100644
    --- a/include/asm-x86/string_32.h
    +++ b/include/asm-x86/string_32.h
    @@ -3,7 +3,7 @@

    #ifdef __KERNEL__

    -/* Let gcc decide wether to inline or use the out of line functions */
    +/* Let gcc decide whether to inline or use the out of line functions */

    #define __HAVE_ARCH_STRCPY
    extern char *strcpy(char *dest, const char *src);
    @@ -32,16 +32,15 @@ extern size_t strlen(const char *s);
    static __always_inline void *__memcpy(void *to, const void *from, size_t n)
    {
    int d0, d1, d2;
    - __asm__ __volatile__(
    - "rep ; movsl\n\t"
    - "movl %4,%%ecx\n\t"
    - "andl $3,%%ecx\n\t"
    - "jz 1f\n\t"
    - "rep ; movsb\n\t"
    - "1:"
    - : "=&c" (d0), "=&D" (d1), "=&S" (d2)
    - : "0" (n/4), "g" (n), "1" ((long)to), "2" ((long)from)
    - : "memory");
    + asm volatile("rep ; movsl\n\t"
    + "movl %4,%%ecx\n\t"
    + "andl $3,%%ecx\n\t"
    + "jz 1f\n\t"
    + "rep ; movsb\n\t"
    + "1:"
    + : "=&c" (d0), "=&D" (d1), "=&S" (d2)
    + : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
    + : "memory");
    return to;
    }

    @@ -74,10 +73,12 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, size_
    *(int *)to = *(int *)from;
    *((char *)to + 4) = *((char *)from + 4);
    return to;
    - case 6: *(int *)to = *(int *)from;
    + case 6:
    + *(int *)to = *(int *)from;
    *((short *)to + 2) = *((short *)from + 2);
    return to;
    - case 8: *(int *)to = *(int *)from;
    + case 8:
    + *(int *)to = *(int *)from;
    *((int *)to + 1) = *((int *)from + 1);
    return to;
    #endif
    @@ -88,54 +89,55 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, size_
    if (n >= 5 * 4) {
    /* large block: use rep prefix */
    int ecx;
    - __asm__ __volatile__(
    - "rep ; movsl"
    - : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
    - : "0" (n/4), "1" (edi), "2" (esi)
    - : "memory"
    + asm volatile("rep ; movsl"
    + : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
    + : "0" (n / 4), "1" (edi), "2" (esi)
    + : "memory"
    );
    } else {
    /* small block: don't clobber ecx + smaller code */
    if (n >= 4 * 4)
    - __asm__ __volatile__("movsl"
    - : "=&D"(edi), "=&S"(esi)
    - : "0"(edi), "1"(esi)
    - : "memory");
    + asm volatile("movsl"
    + : "=&D"(edi), "=&S"(esi)
    + : "0"(edi), "1"(esi)
    + : "memory");
    if (n >= 3 * 4)
    - __asm__ __volatile__("movsl"
    - : "=&D"(edi), "=&S"(esi)
    - : "0"(edi), "1"(esi)
    - : "memory");
    + asm volatile("movsl"
    + : "=&D"(edi), "=&S"(esi)
    + : "0"(edi), "1"(esi)
    + : "memory");
    if (n >= 2 * 4)
    - __asm__ __volatile__("movsl"
    - : "=&D"(edi), "=&S"(esi)
    - : "0"(edi), "1"(esi)
    - : "memory");
    + asm volatile("movsl"
    + : "=&D"(edi), "=&S"(esi)
    + : "0"(edi), "1"(esi)
    + : "memory");
    if (n >= 1 * 4)
    - __asm__ __volatile__("movsl"
    - : "=&D"(edi), "=&S"(esi)
    - : "0"(edi), "1"(esi)
    - : "memory");
    + asm volatile("movsl"
    + : "=&D"(edi), "=&S"(esi)
    + : "0"(edi), "1"(esi)
    + : "memory");
    }
    switch (n % 4) {
    /* tail */
    case 0:
    return to;
    case 1:
    - __asm__ __volatile__("movsb"
    - : "=&D"(edi), "=&S"(esi)
    - : "0"(edi), "1"(esi)
    - : "memory");
    + asm volatile("movsb"
    + : "=&D"(edi), "=&S"(esi)
    + : "0"(edi), "1"(esi)
    + : "memory");
    return to;
    - case 2: __asm__ __volatile__("movsw"
    - : "=&D"(edi), "=&S"(esi)
    - : "0"(edi), "1"(esi)
    - : "memory");
    + case 2:
    + asm volatile("movsw"
    + : "=&D"(edi), "=&S"(esi)
    + : "0"(edi), "1"(esi)
    + : "memory");
    return to;
    - default: __asm__ __volatile__("movsw\n\tmovsb"
    - : "=&D"(edi), "=&S"(esi)
    - : "0"(edi), "1"(esi)
    - : "memory");
    + default:
    + asm volatile("movsw\n\tmovsb"
    + : "=&D"(edi), "=&S"(esi)
    + : "0"(edi), "1"(esi)
    + : "memory");
    return to;
    }
    }
    @@ -193,12 +195,11 @@ extern void *memchr(const void *cs, int c, size_t count);
    static inline void *__memset_generic(void *s, char c, size_t count)
    {
    int d0, d1;
    - __asm__ __volatile__(
    - "rep\n\t"
    - "stosb"
    - : "=&c" (d0), "=&D" (d1)
    - : "a" (c), "1" (s), "0" (count)
    - : "memory");
    + asm volatile("rep\n\t"
    + "stosb"
    + : "=&c" (d0), "=&D" (d1)
    + : "a" (c), "1" (s), "0" (count)
    + : "memory");
    return s;
    }

    @@ -213,18 +214,17 @@ static inline void *__memset_generic(void *s, char c, size_t count)
    static __always_inline void *__constant_c_memset(void *s, unsigned long c, size_t count)
    {
    int d0, d1;
    - __asm__ __volatile__(
    - "rep ; stosl\n\t"
    - "testb $2,%b3\n\t"
    - "je 1f\n\t"
    - "stosw\n"
    - "1:\ttestb $1,%b3\n\t"
    - "je 2f\n\t"
    - "stosb\n"
    - "2:"
    - : "=&c" (d0), "=&D" (d1)
    - : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
    - : "memory");
    + asm volatile("rep ; stosl\n\t"
    + "testb $2,%b3\n\t"
    + "je 1f\n\t"
    + "stosw\n"
    + "1:\ttestb $1,%b3\n\t"
    + "je 2f\n\t"
    + "stosb\n"
    + "2:"
    + : "=&c" (d0), "=&D" (d1)
    + : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
    + : "memory");
    return s;
    }

    @@ -260,13 +260,12 @@ static __always_inline void *__constant_c_and_count_memset(void *s, unsigned lon
    return s;
    }

    -#define COMMON(x) \
    - __asm__ __volatile__( \
    - "rep ; stosl" \
    - x \
    - : "=&c" (d0), "=&D" (d1) \
    - : "a" (pattern), "0" (count/4), "1" ((long)s) \
    - : "memory")
    +#define COMMON(x) \
    + asm volatile("rep ; stosl" \
    + x \
    + : "=&c" (d0), "=&D" (d1) \
    + : "a" (pattern), "0" (count/4), "1" ((long)s) \
    + : "memory")

    {
    int d0, d1;
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  9. [PATCH 100/148] include/asm-x86/processor.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/processor.h | 73 ++++++++++++++++++++++--------------------
    1 files changed, 38 insertions(+), 35 deletions(-)

    diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
    index 958106c..7b1e3a8 100644
    --- a/include/asm-x86/processor.h
    +++ b/include/asm-x86/processor.h
    @@ -175,12 +175,12 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
    unsigned int *ecx, unsigned int *edx)
    {
    /* ecx is often an input as well as an output. */
    - __asm__("cpuid"
    - : "=a" (*eax),
    - "=b" (*ebx),
    - "=c" (*ecx),
    - "=d" (*edx)
    - : "0" (*eax), "2" (*ecx));
    + asm("cpuid"
    + : "=a" (*eax),
    + "=b" (*ebx),
    + "=c" (*ecx),
    + "=d" (*edx)
    + : "0" (*eax), "2" (*ecx));
    }

    static inline void load_cr3(pgd_t *pgdir)
    @@ -430,17 +430,23 @@ static inline unsigned long native_get_debugreg(int regno)

    switch (regno) {
    case 0:
    - asm("mov %%db0, %0" :"=r" (val)); break;
    + asm("mov %%db0, %0" :"=r" (val));
    + break;
    case 1:
    - asm("mov %%db1, %0" :"=r" (val)); break;
    + asm("mov %%db1, %0" :"=r" (val));
    + break;
    case 2:
    - asm("mov %%db2, %0" :"=r" (val)); break;
    + asm("mov %%db2, %0" :"=r" (val));
    + break;
    case 3:
    - asm("mov %%db3, %0" :"=r" (val)); break;
    + asm("mov %%db3, %0" :"=r" (val));
    + break;
    case 6:
    - asm("mov %%db6, %0" :"=r" (val)); break;
    + asm("mov %%db6, %0" :"=r" (val));
    + break;
    case 7:
    - asm("mov %%db7, %0" :"=r" (val)); break;
    + asm("mov %%db7, %0" :"=r" (val));
    + break;
    default:
    BUG();
    }
    @@ -481,14 +487,14 @@ static inline void native_set_iopl_mask(unsigned mask)
    #ifdef CONFIG_X86_32
    unsigned int reg;

    - __asm__ __volatile__ ("pushfl;"
    - "popl %0;"
    - "andl %1, %0;"
    - "orl %2, %0;"
    - "pushl %0;"
    - "popfl"
    - : "=&r" (reg)
    - : "i" (~X86_EFLAGS_IOPL), "r" (mask));
    + asm volatile ("pushfl;"
    + "popl %0;"
    + "andl %1, %0;"
    + "orl %2, %0;"
    + "pushl %0;"
    + "popfl"
    + : "=&r" (reg)
    + : "i" (~X86_EFLAGS_IOPL), "r" (mask));
    #endif
    }

    @@ -526,8 +532,8 @@ static inline void native_swapgs(void)
    #define set_debugreg(value, register) \
    native_set_debugreg(register, value)

    -static inline void
    -load_sp0(struct tss_struct *tss, struct thread_struct *thread)
    +static inline void load_sp0(struct tss_struct *tss,
    + struct thread_struct *thread)
    {
    native_load_sp0(tss, thread);
    }
    @@ -683,7 +689,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
    /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
    static inline void rep_nop(void)
    {
    - __asm__ __volatile__("rep; nop" ::: "memory");
    + asm volatile("rep; nop" ::: "memory");
    }

    static inline void cpu_relax(void)
    @@ -697,32 +703,29 @@ static inline void sync_core(void)
    int tmp;

    asm volatile("cpuid" : "=a" (tmp) : "0" (1)
    - : "ebx", "ecx", "edx", "memory");
    + : "ebx", "ecx", "edx", "memory");
    }

    -static inline void
    -__monitor(const void *eax, unsigned long ecx, unsigned long edx)
    +static inline void __monitor(const void *eax, unsigned long ecx,
    + unsigned long edx)
    {
    /* "monitor %eax, %ecx, %edx;" */
    - asm volatile(
    - ".byte 0x0f, 0x01, 0xc8;"
    - :: "a" (eax), "c" (ecx), "d"(edx));
    + asm volatile(".byte 0x0f, 0x01, 0xc8;"
    + :: "a" (eax), "c" (ecx), "d"(edx));
    }

    static inline void __mwait(unsigned long eax, unsigned long ecx)
    {
    /* "mwait %eax, %ecx;" */
    - asm volatile(
    - ".byte 0x0f, 0x01, 0xc9;"
    - :: "a" (eax), "c" (ecx));
    + asm volatile(".byte 0x0f, 0x01, 0xc9;"
    + :: "a" (eax), "c" (ecx));
    }

    static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
    {
    /* "mwait %eax, %ecx;" */
    - asm volatile(
    - "sti; .byte 0x0f, 0x01, 0xc9;"
    - :: "a" (eax), "c" (ecx));
    + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
    + :: "a" (eax), "c" (ecx));
    }

    extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  10. [PATCH 006/148] include/asm-x86/atomic_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/atomic_32.h | 143 ++++++++++++++++++++----------------------
    1 files changed, 68 insertions(+), 75 deletions(-)

    diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h
    index 437aac8..21a4825 100644
    --- a/include/asm-x86/atomic_32.h
    +++ b/include/asm-x86/atomic_32.h
    @@ -15,138 +15,133 @@
    * on us. We need to use _exactly_ the address the user gave us,
    * not some alias that contains the same information.
    */
    -typedef struct { int counter; } atomic_t;
    +typedef struct {
    + int counter;
    +} atomic_t;

    #define ATOMIC_INIT(i) { (i) }

    /**
    * atomic_read - read atomic variable
    * @v: pointer of type atomic_t
    - *
    + *
    * Atomically reads the value of @v.
    - */
    + */
    #define atomic_read(v) ((v)->counter)

    /**
    * atomic_set - set atomic variable
    * @v: pointer of type atomic_t
    * @i: required value
    - *
    + *
    * Atomically sets the value of @v to @i.
    - */
    -#define atomic_set(v,i) (((v)->counter) = (i))
    + */
    +#define atomic_set(v, i) (((v)->counter) = (i))

    /**
    * atomic_add - add integer to atomic variable
    * @i: integer value to add
    * @v: pointer of type atomic_t
    - *
    + *
    * Atomically adds @i to @v.
    */
    -static __inline__ void atomic_add(int i, atomic_t *v)
    +static inline void atomic_add(int i, atomic_t *v)
    {
    - __asm__ __volatile__(
    - LOCK_PREFIX "addl %1,%0"
    - :"+m" (v->counter)
    - :"ir" (i));
    + asm volatile(LOCK_PREFIX "addl %1,%0"
    + : "+m" (v->counter)
    + : "ir" (i));
    }

    /**
    * atomic_sub - subtract integer from atomic variable
    * @i: integer value to subtract
    * @v: pointer of type atomic_t
    - *
    + *
    * Atomically subtracts @i from @v.
    */
    -static __inline__ void atomic_sub(int i, atomic_t *v)
    +static inline void atomic_sub(int i, atomic_t *v)
    {
    - __asm__ __volatile__(
    - LOCK_PREFIX "subl %1,%0"
    - :"+m" (v->counter)
    - :"ir" (i));
    + asm volatile(LOCK_PREFIX "subl %1,%0"
    + : "+m" (v->counter)
    + : "ir" (i));
    }

    /**
    * atomic_sub_and_test - subtract value from variable and test result
    * @i: integer value to subtract
    * @v: pointer of type atomic_t
    - *
    + *
    * Atomically subtracts @i from @v and returns
    * true if the result is zero, or false for all
    * other cases.
    */
    -static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
    +static inline int atomic_sub_and_test(int i, atomic_t *v)
    {
    unsigned char c;

    - __asm__ __volatile__(
    - LOCK_PREFIX "subl %2,%0; sete %1"
    - :"+m" (v->counter), "=qm" (c)
    - :"ir" (i) : "memory");
    + asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
    + : "+m" (v->counter), "=qm" (c)
    + : "ir" (i) : "memory");
    return c;
    }

    /**
    * atomic_inc - increment atomic variable
    * @v: pointer of type atomic_t
    - *
    + *
    * Atomically increments @v by 1.
    - */
    -static __inline__ void atomic_inc(atomic_t *v)
    + */
    +static inline void atomic_inc(atomic_t *v)
    {
    - __asm__ __volatile__(
    - LOCK_PREFIX "incl %0"
    - :"+m" (v->counter));
    + asm volatile(LOCK_PREFIX "incl %0"
    + : "+m" (v->counter));
    }

    /**
    * atomic_dec - decrement atomic variable
    * @v: pointer of type atomic_t
    - *
    + *
    * Atomically decrements @v by 1.
    - */
    -static __inline__ void atomic_dec(atomic_t *v)
    + */
    +static inline void atomic_dec(atomic_t *v)
    {
    - __asm__ __volatile__(
    - LOCK_PREFIX "decl %0"
    - :"+m" (v->counter));
    + asm volatile(LOCK_PREFIX "decl %0"
    + : "+m" (v->counter));
    }

    /**
    * atomic_dec_and_test - decrement and test
    * @v: pointer of type atomic_t
    - *
    + *
    * Atomically decrements @v by 1 and
    * returns true if the result is 0, or false for all other
    * cases.
    - */
    -static __inline__ int atomic_dec_and_test(atomic_t *v)
    + */
    +static inline int atomic_dec_and_test(atomic_t *v)
    {
    unsigned char c;

    - __asm__ __volatile__(
    - LOCK_PREFIX "decl %0; sete %1"
    - :"+m" (v->counter), "=qm" (c)
    - : : "memory");
    + asm volatile(LOCK_PREFIX "decl %0; sete %1"
    + : "+m" (v->counter), "=qm" (c)
    + : : "memory");
    return c != 0;
    }

    /**
    - * atomic_inc_and_test - increment and test
    + * atomic_inc_and_test - increment and test
    * @v: pointer of type atomic_t
    - *
    + *
    * Atomically increments @v by 1
    * and returns true if the result is zero, or false for all
    * other cases.
    - */
    -static __inline__ int atomic_inc_and_test(atomic_t *v)
    + */
    +static inline int atomic_inc_and_test(atomic_t *v)
    {
    unsigned char c;

    - __asm__ __volatile__(
    - LOCK_PREFIX "incl %0; sete %1"
    - :"+m" (v->counter), "=qm" (c)
    - : : "memory");
    + asm volatile(LOCK_PREFIX "incl %0; sete %1"
    + : "+m" (v->counter), "=qm" (c)
    + : : "memory");
    return c != 0;
    }

    @@ -154,19 +149,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
    * atomic_add_negative - add and test if negative
    * @v: pointer of type atomic_t
    * @i: integer value to add
    - *
    + *
    * Atomically adds @i to @v and returns true
    * if the result is negative, or false when
    * result is greater than or equal to zero.
    - */
    -static __inline__ int atomic_add_negative(int i, atomic_t *v)
    + */
    +static inline int atomic_add_negative(int i, atomic_t *v)
    {
    unsigned char c;

    - __asm__ __volatile__(
    - LOCK_PREFIX "addl %2,%0; sets %1"
    - :"+m" (v->counter), "=qm" (c)
    - :"ir" (i) : "memory");
    + asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
    + : "+m" (v->counter), "=qm" (c)
    + : "ir" (i) : "memory");
    return c;
    }

    @@ -177,20 +171,19 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
    *
    * Atomically adds @i to @v and returns @i + @v
    */
    -static __inline__ int atomic_add_return(int i, atomic_t *v)
    +static inline int atomic_add_return(int i, atomic_t *v)
    {
    int __i;
    #ifdef CONFIG_M386
    unsigned long flags;
    - if(unlikely(boot_cpu_data.x86 <= 3))
    + if (unlikely(boot_cpu_data.x86 <= 3))
    goto no_xadd;
    #endif
    /* Modern 486+ processor */
    __i = i;
    - __asm__ __volatile__(
    - LOCK_PREFIX "xaddl %0, %1"
    - :"+r" (i), "+m" (v->counter)
    - : : "memory");
    + asm volatile(LOCK_PREFIX "xaddl %0, %1"
    + : "+r" (i), "+m" (v->counter)
    + : : "memory");
    return i + __i;

    #ifdef CONFIG_M386
    @@ -210,9 +203,9 @@ no_xadd: /* Legacy 386 processor */
    *
    * Atomically subtracts @i from @v and returns @v - @i
    */
    -static __inline__ int atomic_sub_return(int i, atomic_t *v)
    +static inline int atomic_sub_return(int i, atomic_t *v)
    {
    - return atomic_add_return(-i,v);
    + return atomic_add_return(-i, v);
    }

    #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
    @@ -227,7 +220,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
    * Atomically adds @a to @v, so long as @v was not already @u.
    * Returns non-zero if @v was not @u, and zero otherwise.
    */
    -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
    +static inline int atomic_add_unless(atomic_t *v, int a, int u)
    {
    int c, old;
    c = atomic_read(v);
    @@ -244,17 +237,17 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)

    #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

    -#define atomic_inc_return(v) (atomic_add_return(1,v))
    -#define atomic_dec_return(v) (atomic_sub_return(1,v))
    +#define atomic_inc_return(v) (atomic_add_return(1, v))
    +#define atomic_dec_return(v) (atomic_sub_return(1, v))

    /* These are x86-specific, used by some header files */
    -#define atomic_clear_mask(mask, addr) \
    -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
    -: : "r" (~(mask)),"m" (*addr) : "memory")
    +#define atomic_clear_mask(mask, addr) \
    + asm volatile(LOCK_PREFIX "andl %0,%1" \
    + : : "r" (~(mask)), "m" (*(addr)) : "memory")

    -#define atomic_set_mask(mask, addr) \
    -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
    -: : "r" (mask),"m" (*(addr)) : "memory")
    +#define atomic_set_mask(mask, addr) \
    + asm volatile(LOCK_PREFIX "orl %0,%1" \
    + : : "r" (mask), "m" (*(addr)) : "memory")

    /* Atomic operations are already serializing on x86 */
    #define smp_mb__before_atomic_dec() barrier()
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  11. [PATCH 106/148] include/asm-x86/rwsem.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/rwsem.h | 169 ++++++++++++++++++++++++-----------------------
    1 files changed, 86 insertions(+), 83 deletions(-)

    diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h
    index 520a379..750f2a3 100644
    --- a/include/asm-x86/rwsem.h
    +++ b/include/asm-x86/rwsem.h
    @@ -56,14 +56,16 @@ extern asmregparm struct rw_semaphore *
    /*
    * the semaphore definition
    */
    -struct rw_semaphore {
    - signed long count;
    +
    #define RWSEM_UNLOCKED_VALUE 0x00000000
    #define RWSEM_ACTIVE_BIAS 0x00000001
    #define RWSEM_ACTIVE_MASK 0x0000ffff
    #define RWSEM_WAITING_BIAS (-0x00010000)
    #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
    #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
    +
    +struct rw_semaphore {
    + signed long count;
    spinlock_t wait_lock;
    struct list_head wait_list;
    #ifdef CONFIG_DEBUG_LOCK_ALLOC
    @@ -78,11 +80,13 @@ struct rw_semaphore {
    #endif


    -#define __RWSEM_INITIALIZER(name) \
    -{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
    - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
    +#define __RWSEM_INITIALIZER(name) \
    +{ \
    + RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
    + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) \
    +}

    -#define DECLARE_RWSEM(name) \
    +#define DECLARE_RWSEM(name) \
    struct rw_semaphore name = __RWSEM_INITIALIZER(name)

    extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
    @@ -100,16 +104,16 @@ do { \
    */
    static inline void __down_read(struct rw_semaphore *sem)
    {
    - __asm__ __volatile__(
    - "# beginning down_read\n\t"
    -LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
    - " jns 1f\n"
    - " call call_rwsem_down_read_failed\n"
    - "1:\n\t"
    - "# ending down_read\n\t"
    - : "+m" (sem->count)
    - : "a" (sem)
    - : "memory", "cc");
    + asm volatile("# beginning down_read\n\t"
    + LOCK_PREFIX " incl (%%eax)\n\t"
    + /* adds 0x00000001, returns the old value */
    + " jns 1f\n"
    + " call call_rwsem_down_read_failed\n"
    + "1:\n\t"
    + "# ending down_read\n\t"
    + : "+m" (sem->count)
    + : "a" (sem)
    + : "memory", "cc");
    }

    /*
    @@ -118,21 +122,20 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
    static inline int __down_read_trylock(struct rw_semaphore *sem)
    {
    __s32 result, tmp;
    - __asm__ __volatile__(
    - "# beginning __down_read_trylock\n\t"
    - " movl %0,%1\n\t"
    - "1:\n\t"
    - " movl %1,%2\n\t"
    - " addl %3,%2\n\t"
    - " jle 2f\n\t"
    -LOCK_PREFIX " cmpxchgl %2,%0\n\t"
    - " jnz 1b\n\t"
    - "2:\n\t"
    - "# ending __down_read_trylock\n\t"
    - : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
    - : "i" (RWSEM_ACTIVE_READ_BIAS)
    - : "memory", "cc");
    - return result>=0 ? 1 : 0;
    + asm volatile("# beginning __down_read_trylock\n\t"
    + " movl %0,%1\n\t"
    + "1:\n\t"
    + " movl %1,%2\n\t"
    + " addl %3,%2\n\t"
    + " jle 2f\n\t"
    + LOCK_PREFIX " cmpxchgl %2,%0\n\t"
    + " jnz 1b\n\t"
    + "2:\n\t"
    + "# ending __down_read_trylock\n\t"
    + : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
    + : "i" (RWSEM_ACTIVE_READ_BIAS)
    + : "memory", "cc");
    + return result >= 0 ? 1 : 0;
    }

    /*
    @@ -143,17 +146,18 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
    int tmp;

    tmp = RWSEM_ACTIVE_WRITE_BIAS;
    - __asm__ __volatile__(
    - "# beginning down_write\n\t"
    -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
    - " testl %%edx,%%edx\n\t" /* was the count 0 before? */
    - " jz 1f\n"
    - " call call_rwsem_down_write_failed\n"
    - "1:\n"
    - "# ending down_write"
    - : "+m" (sem->count), "=d" (tmp)
    - : "a" (sem), "1" (tmp)
    - : "memory", "cc");
    + asm volatile("# beginning down_write\n\t"
    + LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
    + /* subtract 0x0000ffff, returns the old value */
    + " testl %%edx,%%edx\n\t"
    + /* was the count 0 before? */
    + " jz 1f\n"
    + " call call_rwsem_down_write_failed\n"
    + "1:\n"
    + "# ending down_write"
    + : "+m" (sem->count), "=d" (tmp)
    + : "a" (sem), "1" (tmp)
    + : "memory", "cc");
    }

    static inline void __down_write(struct rw_semaphore *sem)
    @@ -167,7 +171,7 @@ static inline void __down_write(struct rw_semaphore *sem)
    static inline int __down_write_trylock(struct rw_semaphore *sem)
    {
    signed long ret = cmpxchg(&sem->count,
    - RWSEM_UNLOCKED_VALUE,
    + RWSEM_UNLOCKED_VALUE,
    RWSEM_ACTIVE_WRITE_BIAS);
    if (ret == RWSEM_UNLOCKED_VALUE)
    return 1;
    @@ -180,16 +184,16 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
    static inline void __up_read(struct rw_semaphore *sem)
    {
    __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
    - __asm__ __volatile__(
    - "# beginning __up_read\n\t"
    -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
    - " jns 1f\n\t"
    - " call call_rwsem_wake\n"
    - "1:\n"
    - "# ending __up_read\n"
    - : "+m" (sem->count), "=d" (tmp)
    - : "a" (sem), "1" (tmp)
    - : "memory", "cc");
    + asm volatile("# beginning __up_read\n\t"
    + LOCK_PREFIX " xadd %%edx,(%%eax)\n\t"
    + /* subtracts 1, returns the old value */
    + " jns 1f\n\t"
    + " call call_rwsem_wake\n"
    + "1:\n"
    + "# ending __up_read\n"
    + : "+m" (sem->count), "=d" (tmp)
    + : "a" (sem), "1" (tmp)
    + : "memory", "cc");
    }

    /*
    @@ -197,17 +201,18 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu
    */
    static inline void __up_write(struct rw_semaphore *sem)
    {
    - __asm__ __volatile__(
    - "# beginning __up_write\n\t"
    - " movl %2,%%edx\n\t"
    -LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
    - " jz 1f\n"
    - " call call_rwsem_wake\n"
    - "1:\n\t"
    - "# ending __up_write\n"
    - : "+m" (sem->count)
    - : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
    - : "memory", "cc", "edx");
    + asm volatile("# beginning __up_write\n\t"
    + " movl %2,%%edx\n\t"
    + LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
    + /* tries to transition
    + 0xffff0001 -> 0x00000000 */
    + " jz 1f\n"
    + " call call_rwsem_wake\n"
    + "1:\n\t"
    + "# ending __up_write\n"
    + : "+m" (sem->count)
    + : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
    + : "memory", "cc", "edx");
    }

    /*
    @@ -215,16 +220,16 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 ->
    */
    static inline void __downgrade_write(struct rw_semaphore *sem)
    {
    - __asm__ __volatile__(
    - "# beginning __downgrade_write\n\t"
    -LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
    - " jns 1f\n\t"
    - " call call_rwsem_downgrade_wake\n"
    - "1:\n\t"
    - "# ending __downgrade_write\n"
    - : "+m" (sem->count)
    - : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
    - : "memory", "cc");
    + asm volatile("# beginning __downgrade_write\n\t"
    + LOCK_PREFIX " addl %2,(%%eax)\n\t"
    + /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
    + " jns 1f\n\t"
    + " call call_rwsem_downgrade_wake\n"
    + "1:\n\t"
    + "# ending __downgrade_write\n"
    + : "+m" (sem->count)
    + : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
    + : "memory", "cc");
    }

    /*
    @@ -232,10 +237,9 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001
    */
    static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
    {
    - __asm__ __volatile__(
    -LOCK_PREFIX "addl %1,%0"
    - : "+m" (sem->count)
    - : "ir" (delta));
    + asm volatile(LOCK_PREFIX "addl %1,%0"
    + : "+m" (sem->count)
    + : "ir" (delta));
    }

    /*
    @@ -245,12 +249,11 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
    {
    int tmp = delta;

    - __asm__ __volatile__(
    -LOCK_PREFIX "xadd %0,%1"
    - : "+r" (tmp), "+m" (sem->count)
    - : : "memory");
    + asm volatile(LOCK_PREFIX "xadd %0,%1"
    + : "+r" (tmp), "+m" (sem->count)
    + : : "memory");

    - return tmp+delta;
    + return tmp + delta;
    }

    static inline int rwsem_is_locked(struct rw_semaphore *sem)
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  12. [PATCH 119/148] include/asm-x86/string_64.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/string_64.h | 66 +++++++++++++++++++++---------------------
    1 files changed, 33 insertions(+), 33 deletions(-)

    diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h
    index e583da7..52b5ab3 100644
    --- a/include/asm-x86/string_64.h
    +++ b/include/asm-x86/string_64.h
    @@ -3,26 +3,24 @@

    #ifdef __KERNEL__

    -/* Written 2002 by Andi Kleen */
    +/* Written 2002 by Andi Kleen */

    -/* Only used for special circumstances. Stolen from i386/string.h */
    -static __always_inline void *
    -__inline_memcpy(void * to, const void * from, size_t n)
    +/* Only used for special circumstances. Stolen from i386/string.h */
    +static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
    {
    -unsigned long d0, d1, d2;
    -__asm__ __volatile__(
    - "rep ; movsl\n\t"
    - "testb $2,%b4\n\t"
    - "je 1f\n\t"
    - "movsw\n"
    - "1:\ttestb $1,%b4\n\t"
    - "je 2f\n\t"
    - "movsb\n"
    - "2:"
    - : "=&c" (d0), "=&D" (d1), "=&S" (d2)
    - :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
    - : "memory");
    -return (to);
    + unsigned long d0, d1, d2;
    + asm volatile("rep ; movsl\n\t"
    + "testb $2,%b4\n\t"
    + "je 1f\n\t"
    + "movsw\n"
    + "1:\ttestb $1,%b4\n\t"
    + "je 2f\n\t"
    + "movsb\n"
    + "2:"
    + : "=&c" (d0), "=&D" (d1), "=&S" (d2)
    + : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
    + : "memory");
    + return to;
    }

    /* Even with __builtin_ the compiler may decide to use the out of line
    @@ -32,28 +30,30 @@ return (to);
    #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
    extern void *memcpy(void *to, const void *from, size_t len);
    #else
    -extern void *__memcpy(void *to, const void *from, size_t len);
    -#define memcpy(dst,src,len) \
    - ({ size_t __len = (len); \
    - void *__ret; \
    - if (__builtin_constant_p(len) && __len >= 64) \
    - __ret = __memcpy((dst),(src),__len); \
    - else \
    - __ret = __builtin_memcpy((dst),(src),__len); \
    - __ret; })
    +extern void *__memcpy(void *to, const void *from, size_t len);
    +#define memcpy(dst, src, len) \
    +({ \
    + size_t __len = (len); \
    + void *__ret; \
    + if (__builtin_constant_p(len) && __len >= 64) \
    + __ret = __memcpy((dst), (src), __len); \
    + else \
    + __ret = __builtin_memcpy((dst), (src), __len); \
    + __ret; \
    +})
    #endif

    #define __HAVE_ARCH_MEMSET
    void *memset(void *s, int c, size_t n);

    #define __HAVE_ARCH_MEMMOVE
    -void * memmove(void * dest,const void *src,size_t count);
    +void *memmove(void *dest, const void *src, size_t count);

    -int memcmp(const void * cs,const void * ct,size_t count);
    -size_t strlen(const char * s);
    -char *strcpy(char * dest,const char *src);
    -char *strcat(char * dest, const char * src);
    -int strcmp(const char * cs,const char * ct);
    +int memcmp(const void *cs, const void *ct, size_t count);
    +size_t strlen(const char *s);
    +char *strcpy(char *dest, const char *src);
    +char *strcat(char *dest, const char *src);
    +int strcmp(const char *cs, const char *ct);

    #endif /* __KERNEL__ */

    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  13. [PATCH 120/148] include/asm-x86/suspend_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/suspend_32.h | 12 ++++++------
    1 files changed, 6 insertions(+), 6 deletions(-)

    diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
    index 1bbda3a..24e1c08 100644
    --- a/include/asm-x86/suspend_32.h
    +++ b/include/asm-x86/suspend_32.h
    @@ -10,7 +10,7 @@ static inline int arch_prepare_suspend(void) { return 0; }

    /* image of the saved processor state */
    struct saved_context {
    - u16 es, fs, gs, ss;
    + u16 es, fs, gs, ss;
    unsigned long cr0, cr2, cr3, cr4;
    struct desc_ptr gdt;
    struct desc_ptr idt;
    @@ -32,11 +32,11 @@ extern unsigned long saved_edi;
    static inline void acpi_save_register_state(unsigned long return_point)
    {
    saved_eip = return_point;
    - asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
    - asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
    - asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
    - asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
    - asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
    + asm volatile("movl %%esp,%0" : "=m" (saved_esp));
    + asm volatile("movl %%ebp,%0" : "=m" (saved_ebp));
    + asm volatile("movl %%ebx,%0" : "=m" (saved_ebx));
    + asm volatile("movl %%edi,%0" : "=m" (saved_edi));
    + asm volatile("movl %%esi,%0" : "=m" (saved_esi));
    }

    #define acpi_restore_register_state() do {} while (0)
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  14. [PATCH 026/148] include/asm-x86/dma-mapping_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/dma-mapping_32.h | 130 ++++++++++++++++++-------------------
    1 files changed, 63 insertions(+), 67 deletions(-)

    diff --git a/include/asm-x86/dma-mapping_32.h b/include/asm-x86/dma-mapping_32.h
    index 55f01bd..4f6c2a4 100644
    --- a/include/asm-x86/dma-mapping_32.h
    +++ b/include/asm-x86/dma-mapping_32.h
    @@ -12,14 +12,14 @@
    #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)

    void *dma_alloc_coherent(struct device *dev, size_t size,
    - dma_addr_t *dma_handle, gfp_t flag);
    + dma_addr_t *dma_handle, gfp_t flag);

    void dma_free_coherent(struct device *dev, size_t size,
    - void *vaddr, dma_addr_t dma_handle);
    + void *vaddr, dma_addr_t dma_handle);

    -static inline dma_addr_t
    -dma_map_single(struct device *dev, void *ptr, size_t size,
    - enum dma_data_direction direction)
    +static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
    + size_t size,
    + enum dma_data_direction direction)
    {
    BUG_ON(!valid_dma_direction(direction));
    WARN_ON(size == 0);
    @@ -27,16 +27,15 @@ dma_map_single(struct device *dev, void *ptr, size_t size,
    return virt_to_phys(ptr);
    }

    -static inline void
    -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
    - enum dma_data_direction direction)
    +static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
    + size_t size,
    + enum dma_data_direction direction)
    {
    BUG_ON(!valid_dma_direction(direction));
    }

    -static inline int
    -dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
    - enum dma_data_direction direction)
    +static inline int dma_map_sg(struct device *dev, struct scatterlist *sglist,
    + int nents, enum dma_data_direction direction)
    {
    struct scatterlist *sg;
    int i;
    @@ -54,88 +53,89 @@ dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
    return nents;
    }

    -static inline dma_addr_t
    -dma_map_page(struct device *dev, struct page *page, unsigned long offset,
    - size_t size, enum dma_data_direction direction)
    +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
    + unsigned long offset, size_t size,
    + enum dma_data_direction direction)
    {
    BUG_ON(!valid_dma_direction(direction));
    return page_to_phys(page) + offset;
    }

    -static inline void
    -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
    - enum dma_data_direction direction)
    +static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
    + size_t size,
    + enum dma_data_direction direction)
    {
    BUG_ON(!valid_dma_direction(direction));
    }


    -static inline void
    -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
    - enum dma_data_direction direction)
    +static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
    + int nhwentries,
    + enum dma_data_direction direction)
    {
    BUG_ON(!valid_dma_direction(direction));
    }

    -static inline void
    -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
    - enum dma_data_direction direction)
    +static inline void dma_sync_single_for_cpu(struct device *dev,
    + dma_addr_t dma_handle, size_t size,
    + enum dma_data_direction direction)
    {
    }

    -static inline void
    -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
    - enum dma_data_direction direction)
    +static inline void dma_sync_single_for_device(struct device *dev,
    + dma_addr_t dma_handle,
    + size_t size,
    + enum dma_data_direction direction)
    {
    flush_write_buffers();
    }

    -static inline void
    -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
    - unsigned long offset, size_t size,
    - enum dma_data_direction direction)
    +static inline void dma_sync_single_range_for_cpu(struct device *dev,
    + dma_addr_t dma_handle,
    + unsigned long offset,
    + size_t size,
    + enum dma_data_direction direction)
    {
    }

    -static inline void
    -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
    - unsigned long offset, size_t size,
    - enum dma_data_direction direction)
    +static inline void dma_sync_single_range_for_device(struct device *dev,
    + dma_addr_t dma_handle,
    + unsigned long offset,
    + size_t size,
    + enum dma_data_direction direction)
    {
    flush_write_buffers();
    }

    -static inline void
    -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
    - enum dma_data_direction direction)
    +static inline void dma_sync_sg_for_cpu(struct device *dev,
    + struct scatterlist *sg, int nelems,
    + enum dma_data_direction direction)
    {
    }

    -static inline void
    -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
    - enum dma_data_direction direction)
    +static inline void dma_sync_sg_for_device(struct device *dev,
    + struct scatterlist *sg, int nelems,
    + enum dma_data_direction direction)
    {
    flush_write_buffers();
    }

    -static inline int
    -dma_mapping_error(dma_addr_t dma_addr)
    +static inline int dma_mapping_error(dma_addr_t dma_addr)
    {
    return 0;
    }

    extern int forbid_dac;

    -static inline int
    -dma_supported(struct device *dev, u64 mask)
    +static inline int dma_supported(struct device *dev, u64 mask)
    {
    - /*
    - * we fall back to GFP_DMA when the mask isn't all 1s,
    - * so we can't guarantee allocations that must be
    - * within a tighter range than GFP_DMA..
    - */
    - if(mask < 0x00ffffff)
    - return 0;
    + /*
    + * we fall back to GFP_DMA when the mask isn't all 1s,
    + * so we can't guarantee allocations that must be
    + * within a tighter range than GFP_DMA..
    + */
    + if (mask < 0x00ffffff)
    + return 0;

    /* Work around chipset bugs */
    if (forbid_dac > 0 && mask > 0xffffffffULL)
    @@ -144,10 +144,9 @@ dma_supported(struct device *dev, u64 mask)
    return 1;
    }

    -static inline int
    -dma_set_mask(struct device *dev, u64 mask)
    +static inline int dma_set_mask(struct device *dev, u64 mask)
    {
    - if(!dev->dma_mask || !dma_supported(dev, mask))
    + if (!dev->dma_mask || !dma_supported(dev, mask))
    return -EIO;

    *dev->dma_mask = mask;
    @@ -155,8 +154,7 @@ dma_set_mask(struct device *dev, u64 mask)
    return 0;
    }

    -static inline int
    -dma_get_cache_alignment(void)
    +static inline int dma_get_cache_alignment(void)
    {
    /* no easy way to get cache size on all x86, so return the
    * maximum possible, to be safe */
    @@ -165,23 +163,21 @@ dma_get_cache_alignment(void)

    #define dma_is_consistent(d, h) (1)

    -static inline void
    -dma_cache_sync(struct device *dev, void *vaddr, size_t size,
    - enum dma_data_direction direction)
    +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
    + enum dma_data_direction direction)
    {
    flush_write_buffers();
    }

    #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
    -extern int
    -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
    - dma_addr_t device_addr, size_t size, int flags);
    +extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
    + dma_addr_t device_addr, size_t size,
    + int flags);

    -extern void
    -dma_release_declared_memory(struct device *dev);
    +extern void dma_release_declared_memory(struct device *dev);

    -extern void *
    -dma_mark_declared_memory_occupied(struct device *dev,
    - dma_addr_t device_addr, size_t size);
    +extern void *dma_mark_declared_memory_occupied(struct device *dev,
    + dma_addr_t device_addr,
    + size_t size);

    #endif
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  15. [PATCH 116/148] include/asm-x86/spinlock.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/spinlock.h | 105 +++++++++++++++++++++-----------------------
    1 files changed, 50 insertions(+), 55 deletions(-)

    diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
    index 23804c1..47dfe26 100644
    --- a/include/asm-x86/spinlock.h
    +++ b/include/asm-x86/spinlock.h
    @@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
    {
    short inc = 0x0100;

    - __asm__ __volatile__ (
    + asm volatile (
    LOCK_PREFIX "xaddw %w0, %1\n"
    "1:\t"
    "cmpb %h0, %b0\n\t"
    @@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
    /* don't need lfence here, because loads are in-order */
    "jmp 1b\n"
    "2:"
    - :"+Q" (inc), "+m" (lock->slock)
    + : "+Q" (inc), "+m" (lock->slock)
    :
    - :"memory", "cc");
    + : "memory", "cc");
    }

    #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
    @@ -104,30 +104,28 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
    int tmp;
    short new;

    - asm volatile(
    - "movw %2,%w0\n\t"
    - "cmpb %h0,%b0\n\t"
    - "jne 1f\n\t"
    - "movw %w0,%w1\n\t"
    - "incb %h1\n\t"
    - "lock ; cmpxchgw %w1,%2\n\t"
    - "1:"
    - "sete %b1\n\t"
    - "movzbl %b1,%0\n\t"
    - :"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
    - :
    - : "memory", "cc");
    + asm volatile("movw %2,%w0\n\t"
    + "cmpb %h0,%b0\n\t"
    + "jne 1f\n\t"
    + "movw %w0,%w1\n\t"
    + "incb %h1\n\t"
    + "lock ; cmpxchgw %w1,%2\n\t"
    + "1:"
    + "sete %b1\n\t"
    + "movzbl %b1,%0\n\t"
    + : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
    + :
    + : "memory", "cc");

    return tmp;
    }

    static inline void __raw_spin_unlock(raw_spinlock_t *lock)
    {
    - __asm__ __volatile__(
    - UNLOCK_LOCK_PREFIX "incb %0"
    - :"+m" (lock->slock)
    - :
    - :"memory", "cc");
    + asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
    + : "+m" (lock->slock)
    + :
    + : "memory", "cc");
    }
    #else
    static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
    @@ -149,21 +147,20 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
    int inc = 0x00010000;
    int tmp;

    - __asm__ __volatile__ (
    - "lock ; xaddl %0, %1\n"
    - "movzwl %w0, %2\n\t"
    - "shrl $16, %0\n\t"
    - "1:\t"
    - "cmpl %0, %2\n\t"
    - "je 2f\n\t"
    - "rep ; nop\n\t"
    - "movzwl %1, %2\n\t"
    - /* don't need lfence here, because loads are in-order */
    - "jmp 1b\n"
    - "2:"
    - :"+Q" (inc), "+m" (lock->slock), "=r" (tmp)
    - :
    - :"memory", "cc");
    + asm volatile("lock ; xaddl %0, %1\n"
    + "movzwl %w0, %2\n\t"
    + "shrl $16, %0\n\t"
    + "1:\t"
    + "cmpl %0, %2\n\t"
    + "je 2f\n\t"
    + "rep ; nop\n\t"
    + "movzwl %1, %2\n\t"
    + /* don't need lfence here, because loads are in-order */
    + "jmp 1b\n"
    + "2:"
    + : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
    + :
    + : "memory", "cc");
    }

    #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
    @@ -173,31 +170,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
    int tmp;
    int new;

    - asm volatile(
    - "movl %2,%0\n\t"
    - "movl %0,%1\n\t"
    - "roll $16, %0\n\t"
    - "cmpl %0,%1\n\t"
    - "jne 1f\n\t"
    - "addl $0x00010000, %1\n\t"
    - "lock ; cmpxchgl %1,%2\n\t"
    - "1:"
    - "sete %b1\n\t"
    - "movzbl %b1,%0\n\t"
    - :"=&a" (tmp), "=r" (new), "+m" (lock->slock)
    - :
    - : "memory", "cc");
    + asm volatile("movl %2,%0\n\t"
    + "movl %0,%1\n\t"
    + "roll $16, %0\n\t"
    + "cmpl %0,%1\n\t"
    + "jne 1f\n\t"
    + "addl $0x00010000, %1\n\t"
    + "lock ; cmpxchgl %1,%2\n\t"
    + "1:"
    + "sete %b1\n\t"
    + "movzbl %b1,%0\n\t"
    + : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
    + :
    + : "memory", "cc");

    return tmp;
    }

    static inline void __raw_spin_unlock(raw_spinlock_t *lock)
    {
    - __asm__ __volatile__(
    - UNLOCK_LOCK_PREFIX "incw %0"
    - :"+m" (lock->slock)
    - :
    - :"memory", "cc");
    + asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
    + : "+m" (lock->slock)
    + :
    + : "memory", "cc");
    }
    #endif

    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  16. [PATCH 107/148] include/asm-x86/semaphore_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/semaphore_32.h | 104 +++++++++++++++++++---------------------
    1 files changed, 50 insertions(+), 54 deletions(-)

    diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h
    index ac96d38..42a7e39 100644
    --- a/include/asm-x86/semaphore_32.h
    +++ b/include/asm-x86/semaphore_32.h
    @@ -55,12 +55,12 @@ struct semaphore {
    .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
    }

    -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
    - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
    +#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
    + struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)

    -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
    +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)

    -static inline void sema_init (struct semaphore *sem, int val)
    +static inline void sema_init(struct semaphore *sem, int val)
    {
    /*
    * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
    @@ -73,19 +73,19 @@ static inline void sema_init (struct semaphore *sem, int val)
    init_waitqueue_head(&sem->wait);
    }

    -static inline void init_MUTEX (struct semaphore *sem)
    +static inline void init_MUTEX(struct semaphore *sem)
    {
    sema_init(sem, 1);
    }

    -static inline void init_MUTEX_LOCKED (struct semaphore *sem)
    +static inline void init_MUTEX_LOCKED(struct semaphore *sem)
    {
    sema_init(sem, 0);
    }

    extern asmregparm void __down_failed(atomic_t *count_ptr);
    -extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr);
    -extern asmregparm int __down_failed_trylock(atomic_t *count_ptr);
    +extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr);
    +extern asmregparm int __down_failed_trylock(atomic_t *count_ptr);
    extern asmregparm void __up_wakeup(atomic_t *count_ptr);

    /*
    @@ -93,41 +93,39 @@ extern asmregparm void __up_wakeup(atomic_t *count_ptr);
    * "__down_failed" is a special asm handler that calls the C
    * routine that actually waits. See arch/i386/kernel/semaphore.c
    */
    -static inline void down(struct semaphore * sem)
    +static inline void down(struct semaphore *sem)
    {
    might_sleep();
    - __asm__ __volatile__(
    - "# atomic down operation\n\t"
    - LOCK_PREFIX "decl %0\n\t" /* --sem->count */
    - "jns 2f\n"
    - "\tlea %0,%%eax\n\t"
    - "call __down_failed\n"
    - "2:"
    - :"+m" (sem->count)
    - :
    - :"memory","ax");
    + asm volatile("# atomic down operation\n\t"
    + LOCK_PREFIX "decl %0\n\t" /* --sem->count */
    + "jns 2f\n"
    + "\tlea %0,%%eax\n\t"
    + "call __down_failed\n"
    + "2:"
    + : "+m" (sem->count)
    + :
    + : "memory", "ax");
    }

    /*
    * Interruptible try to acquire a semaphore. If we obtained
    * it, return zero. If we were interrupted, returns -EINTR
    */
    -static inline int down_interruptible(struct semaphore * sem)
    +static inline int down_interruptible(struct semaphore *sem)
    {
    int result;

    might_sleep();
    - __asm__ __volatile__(
    - "# atomic interruptible down operation\n\t"
    - "xorl %0,%0\n\t"
    - LOCK_PREFIX "decl %1\n\t" /* --sem->count */
    - "jns 2f\n\t"
    - "lea %1,%%eax\n\t"
    - "call __down_failed_interruptible\n"
    - "2:"
    - :"=&a" (result), "+m" (sem->count)
    - :
    - :"memory");
    + asm volatile("# atomic interruptible down operation\n\t"
    + "xorl %0,%0\n\t"
    + LOCK_PREFIX "decl %1\n\t" /* --sem->count */
    + "jns 2f\n\t"
    + "lea %1,%%eax\n\t"
    + "call __down_failed_interruptible\n"
    + "2:"
    + : "=&a" (result), "+m" (sem->count)
    + :
    + : "memory");
    return result;
    }

    @@ -135,21 +133,20 @@ static inline int down_interruptible(struct semaphore * sem)
    * Non-blockingly attempt to down() a semaphore.
    * Returns zero if we acquired it
    */
    -static inline int down_trylock(struct semaphore * sem)
    +static inline int down_trylock(struct semaphore *sem)
    {
    int result;

    - __asm__ __volatile__(
    - "# atomic interruptible down operation\n\t"
    - "xorl %0,%0\n\t"
    - LOCK_PREFIX "decl %1\n\t" /* --sem->count */
    - "jns 2f\n\t"
    - "lea %1,%%eax\n\t"
    - "call __down_failed_trylock\n\t"
    - "2:\n"
    - :"=&a" (result), "+m" (sem->count)
    - :
    - :"memory");
    + asm volatile("# atomic interruptible down operation\n\t"
    + "xorl %0,%0\n\t"
    + LOCK_PREFIX "decl %1\n\t" /* --sem->count */
    + "jns 2f\n\t"
    + "lea %1,%%eax\n\t"
    + "call __down_failed_trylock\n\t"
    + "2:\n"
    + : "=&a" (result), "+m" (sem->count)
    + :
    + : "memory");
    return result;
    }

    @@ -157,18 +154,17 @@ static inline int down_trylock(struct semaphore * sem)
    * Note! This is subtle. We jump to wake people up only if
    * the semaphore was negative (== somebody was waiting on it).
    */
    -static inline void up(struct semaphore * sem)
    +static inline void up(struct semaphore *sem)
    {
    - __asm__ __volatile__(
    - "# atomic up operation\n\t"
    - LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
    - "jg 1f\n\t"
    - "lea %0,%%eax\n\t"
    - "call __up_wakeup\n"
    - "1:"
    - :"+m" (sem->count)
    - :
    - :"memory","ax");
    + asm volatile("# atomic up operation\n\t"
    + LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
    + "jg 1f\n\t"
    + "lea %0,%%eax\n\t"
    + "call __up_wakeup\n"
    + "1:"
    + : "+m" (sem->count)
    + :
    + : "memory", "ax");
    }

    #endif
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  17. [PATCH 025/148] include/asm-x86/dma.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/dma.h | 45 ++++++++++++++++++++++-----------------------
    1 files changed, 22 insertions(+), 23 deletions(-)

    diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h
    index e9733ce..ca1098a 100644
    --- a/include/asm-x86/dma.h
    +++ b/include/asm-x86/dma.h
    @@ -12,7 +12,6 @@
    #include /* need byte IO */
    #include

    -
    #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
    #define dma_outb outb_p
    #else
    @@ -74,15 +73,15 @@
    #ifdef CONFIG_X86_32

    /* The maximum address that we can perform a DMA transfer to on this platform */
    -#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
    +#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)

    #else

    /* 16MB ISA DMA zone */
    -#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
    +#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT)

    /* 4GB broken PCI/AGP hardware bus master zone */
    -#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
    +#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)

    /* Compat define for old dma zone */
    #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
    @@ -154,20 +153,20 @@

    extern spinlock_t dma_spin_lock;

    -static __inline__ unsigned long claim_dma_lock(void)
    +static inline unsigned long claim_dma_lock(void)
    {
    unsigned long flags;
    spin_lock_irqsave(&dma_spin_lock, flags);
    return flags;
    }

    -static __inline__ void release_dma_lock(unsigned long flags)
    +static inline void release_dma_lock(unsigned long flags)
    {
    spin_unlock_irqrestore(&dma_spin_lock, flags);
    }

    /* enable/disable a specific DMA channel */
    -static __inline__ void enable_dma(unsigned int dmanr)
    +static inline void enable_dma(unsigned int dmanr)
    {
    if (dmanr <= 3)
    dma_outb(dmanr, DMA1_MASK_REG);
    @@ -175,7 +174,7 @@ static __inline__ void enable_dma(unsigned int dmanr)
    dma_outb(dmanr & 3, DMA2_MASK_REG);
    }

    -static __inline__ void disable_dma(unsigned int dmanr)
    +static inline void disable_dma(unsigned int dmanr)
    {
    if (dmanr <= 3)
    dma_outb(dmanr | 4, DMA1_MASK_REG);
    @@ -190,7 +189,7 @@ static __inline__ void disable_dma(unsigned int dmanr)
    * --- In order to do that, the DMA routines below should ---
    * --- only be used while holding the DMA lock ! ---
    */
    -static __inline__ void clear_dma_ff(unsigned int dmanr)
    +static inline void clear_dma_ff(unsigned int dmanr)
    {
    if (dmanr <= 3)
    dma_outb(0, DMA1_CLEAR_FF_REG);
    @@ -199,7 +198,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr)
    }

    /* set mode (above) for a specific DMA channel */
    -static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
    +static inline void set_dma_mode(unsigned int dmanr, char mode)
    {
    if (dmanr <= 3)
    dma_outb(mode | dmanr, DMA1_MODE_REG);
    @@ -212,7 +211,7 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
    * the lower 16 bits of the DMA current address register, but a 64k boundary
    * may have been crossed.
    */
    -static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
    +static inline void set_dma_page(unsigned int dmanr, char pagenr)
    {
    switch (dmanr) {
    case 0:
    @@ -243,15 +242,15 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
    /* Set transfer address & page bits for specific DMA channel.
    * Assumes dma flipflop is clear.
    */
    -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
    +static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
    {
    set_dma_page(dmanr, a>>16);
    if (dmanr <= 3) {
    dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
    dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
    } else {
    - dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
    - dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
    + dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
    + dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
    }
    }

    @@ -264,18 +263,18 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
    * Assumes dma flip-flop is clear.
    * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
    */
    -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
    +static inline void set_dma_count(unsigned int dmanr, unsigned int count)
    {
    count--;
    if (dmanr <= 3) {
    - dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
    - dma_outb((count >> 8) & 0xff,
    - ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
    + dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
    + dma_outb((count >> 8) & 0xff,
    + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
    } else {
    - dma_outb((count >> 1) & 0xff,
    - ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
    - dma_outb((count >> 9) & 0xff,
    - ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
    + dma_outb((count >> 1) & 0xff,
    + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
    + dma_outb((count >> 9) & 0xff,
    + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
    }
    }

    @@ -288,7 +287,7 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
    *
    * Assumes DMA flip-flop is clear.
    */
    -static __inline__ int get_dma_residue(unsigned int dmanr)
    +static inline int get_dma_residue(unsigned int dmanr)
    {
    unsigned int io_port;
    /* using short to get 16-bit wrap around */
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  18. [PATCH 144/148] include/asm-x86/vmi.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/vmi.h | 88 ++++++++++++++++++++++++------------------------
    1 files changed, 44 insertions(+), 44 deletions(-)

    diff --git a/include/asm-x86/vmi.h b/include/asm-x86/vmi.h
    index eb8bd89..b7c0dea 100644
    --- a/include/asm-x86/vmi.h
    +++ b/include/asm-x86/vmi.h
    @@ -155,9 +155,9 @@

    #ifndef __ASSEMBLY__
    struct vmi_relocation_info {
    - unsigned char *eip;
    - unsigned char type;
    - unsigned char reserved[3];
    + unsigned char *eip;
    + unsigned char type;
    + unsigned char reserved[3];
    };
    #endif

    @@ -173,53 +173,53 @@ struct vmi_relocation_info {
    #ifndef __ASSEMBLY__

    struct vrom_header {
    - u16 rom_signature; // option ROM signature
    - u8 rom_length; // ROM length in 512 byte chunks
    - u8 rom_entry[4]; // 16-bit code entry point
    - u8 rom_pad0; // 4-byte align pad
    - u32 vrom_signature; // VROM identification signature
    - u8 api_version_min;// Minor version of API
    - u8 api_version_maj;// Major version of API
    - u8 jump_slots; // Number of jump slots
    - u8 reserved1; // Reserved for expansion
    - u32 virtual_top; // Hypervisor virtual address start
    - u16 reserved2; // Reserved for expansion
    - u16 license_offs; // Offset to License string
    - u16 pci_header_offs;// Offset to PCI OPROM header
    - u16 pnp_header_offs;// Offset to PnP OPROM header
    - u32 rom_pad3; // PnP reserverd / VMI reserved
    - u8 reserved[96]; // Reserved for headers
    - char vmi_init[8]; // VMI_Init jump point
    - char get_reloc[8]; // VMI_GetRelocationInfo jump point
    + u16 rom_signature; /* option ROM signature */
    + u8 rom_length; /* ROM length in 512 byte chunks */
    + u8 rom_entry[4]; /* 16-bit code entry point */
    + u8 rom_pad0; /* 4-byte align pad */
    + u32 vrom_signature; /* VROM identification signature */
    + u8 api_version_min;/* Minor version of API */
    + u8 api_version_maj;/* Major version of API */
    + u8 jump_slots; /* Number of jump slots */
    + u8 reserved1; /* Reserved for expansion */
    + u32 virtual_top; /* Hypervisor virtual address start */
    + u16 reserved2; /* Reserved for expansion */
    + u16 license_offs; /* Offset to License string */
    + u16 pci_header_offs;/* Offset to PCI OPROM header */
    + u16 pnp_header_offs;/* Offset to PnP OPROM header */
    + u32 rom_pad3; /* PnP reserverd / VMI reserved */
    + u8 reserved[96]; /* Reserved for headers */
    + char vmi_init[8]; /* VMI_Init jump point */
    + char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
    } __attribute__((packed));

    struct pnp_header {
    - char sig[4];
    - char rev;
    - char size;
    - short next;
    - short res;
    - long devID;
    - unsigned short manufacturer_offset;
    - unsigned short product_offset;
    + char sig[4];
    + char rev;
    + char size;
    + short next;
    + short res;
    + long devID;
    + unsigned short manufacturer_offset;
    + unsigned short product_offset;
    } __attribute__((packed));

    struct pci_header {
    - char sig[4];
    - short vendorID;
    - short deviceID;
    - short vpdData;
    - short size;
    - char rev;
    - char class;
    - char subclass;
    - char interface;
    - short chunks;
    - char rom_version_min;
    - char rom_version_maj;
    - char codetype;
    - char lastRom;
    - short reserved;
    + char sig[4];
    + short vendorID;
    + short deviceID;
    + short vpdData;
    + short size;
    + char rev;
    + char class;
    + char subclass;
    + char interface;
    + short chunks;
    + char rom_version_min;
    + char rom_version_maj;
    + char codetype;
    + char lastRom;
    + short reserved;
    } __attribute__((packed));

    /* Function prototypes for bootstrapping */
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  19. [PATCH 114/148] include/asm-x86/smp_32.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/smp_32.h | 6 +++---
    1 files changed, 3 insertions(+), 3 deletions(-)

    diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
    index f861d04..cb3ada2 100644
    --- a/include/asm-x86/smp_32.h
    +++ b/include/asm-x86/smp_32.h
    @@ -18,8 +18,8 @@

    extern cpumask_t cpu_callin_map;

    -extern void (*mtrr_hook) (void);
    -extern void zap_low_mappings (void);
    +extern void (*mtrr_hook)(void);
    +extern void zap_low_mappings(void);

    #ifdef CONFIG_SMP
    /*
    @@ -44,7 +44,7 @@ static inline int num_booting_cpus(void)

    #ifdef CONFIG_X86_LOCAL_APIC

    -static __inline int logical_smp_processor_id(void)
    +static inline int logical_smp_processor_id(void)
    {
    /* we don't want to mark this access volatile - bad code generation */
    return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  20. [PATCH 103/148] include/asm-x86/reboot.h: checkpatch cleanups - formatting only


    Signed-off-by: Joe Perches
    ---
    include/asm-x86/reboot.h | 3 +--
    1 files changed, 1 insertions(+), 2 deletions(-)

    diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
    index e9e3ffc..6b5233b 100644
    --- a/include/asm-x86/reboot.h
    +++ b/include/asm-x86/reboot.h
    @@ -3,8 +3,7 @@

    struct pt_regs;

    -struct machine_ops
    -{
    +struct machine_ops {
    void (*restart)(char *cmd);
    void (*halt)(void);
    void (*power_off)(void);
    --
    1.5.4.rc2

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread
Page 2 of 10 FirstFirst 1 2 3 4 ... LastLast