[PATCH 0/28] integrate dma_ops - Kernel

This is a discussion on [PATCH 0/28] integrate dma_ops - Kernel ; Just return our allocation if we don't have an mmu. For i386, where this patch is being applied, we never have. So our goal is just to have the code to look like x86_64's. Signed-off-by: Glauber Costa --- arch/x86/kernel/pci-dma_32.c | ...

+ Reply to Thread
Page 2 of 2 FirstFirst 1 2
Results 21 to 28 of 28

Thread: [PATCH 0/28] integrate dma_ops

  1. [PATCH 26/28] x86: return conditional to mmu

    Just return our allocation if we don't have an mmu. For i386, where this patch
    is being applied, we never have. So our goal is just to have the code to look like
    x86_64's.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 34 ++++++++++++++++++++++++++++++++--
    1 files changed, 32 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 5450bd1..f134de3 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -116,12 +116,42 @@ again:
    gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
    goto again;
    }
    +
    + /* Let low level make its own zone decisions */
    + gfp &= ~(GFP_DMA32|GFP_DMA);
    +
    + if (dma_ops->alloc_coherent)
    + return dma_ops->alloc_coherent(dev, size,
    + dma_handle, gfp);
    + return NULL;
    +
    }
    memset(ret, 0, size);
    - *dma_handle = bus;
    + if (!mmu) {
    + *dma_handle = bus;
    + return ret;
    + }
    + }
    +
    + if (dma_ops->alloc_coherent) {
    + free_pages((unsigned long)ret, get_order(size));
    + gfp &= ~(GFP_DMA|GFP_DMA32);
    + return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
    + }
    +
    + if (dma_ops->map_simple) {
    + *dma_handle = dma_ops->map_simple(dev, virt_to_phys(ret),
    + size,
    + PCI_DMA_BIDIRECTIONAL);
    + if (*dma_handle != bad_dma_address)
    + return ret;
    }

    - return ret;
    + if (panic_on_overflow)
    + panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
    + (unsigned long)size);
    + free_pages((unsigned long)ret, get_order(size));
    + return NULL;
    }
    EXPORT_SYMBOL(dma_alloc_coherent);

    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. [PATCH 12/28] x86: move x86_64-specific to common code.

    This patch moves the bootmem functions, that are largely
    x86_64-specific into pci-dma.c. The code goes inside an ifdef.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma.c | 73 ++++++++++++++++++++++++++++++++++++++++++
    arch/x86/kernel/pci-dma_64.c | 68 ---------------------------------------
    2 files changed, 73 insertions(+), 68 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index e81e16f..f6d6a92 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -1,7 +1,10 @@
    #include
    #include
    #include
    +#include

    +#include
    +#include
    #include
    #include

    @@ -66,3 +69,73 @@ static __devinit void via_no_dac(struct pci_dev *dev)
    }
    DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
    #endif
    +
    +#ifdef CONFIG_X86_64
    +static __initdata void *dma32_bootmem_ptr;
    +static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
    +
    +static int __init parse_dma32_size_opt(char *p)
    +{
    + if (!p)
    + return -EINVAL;
    + dma32_bootmem_size = memparse(p, &p);
    + return 0;
    +}
    +early_param("dma32_size", parse_dma32_size_opt);
    +
    +void __init dma32_reserve_bootmem(void)
    +{
    + unsigned long size, align;
    + if (end_pfn <= MAX_DMA32_PFN)
    + return;
    +
    + align = 64ULL<<20;
    + size = round_up(dma32_bootmem_size, align);
    + dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
    + __pa(MAX_DMA_ADDRESS));
    + if (dma32_bootmem_ptr)
    + dma32_bootmem_size = size;
    + else
    + dma32_bootmem_size = 0;
    +}
    +static void __init dma32_free_bootmem(void)
    +{
    + int node;
    +
    + if (end_pfn <= MAX_DMA32_PFN)
    + return;
    +
    + if (!dma32_bootmem_ptr)
    + return;
    +
    + for_each_online_node(node)
    + free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
    + dma32_bootmem_size);
    +
    + dma32_bootmem_ptr = NULL;
    + dma32_bootmem_size = 0;
    +}
    +
    +void __init pci_iommu_alloc(void)
    +{
    + /* free the range so iommu could get some range less than 4G */
    + dma32_free_bootmem();
    + /*
    + * The order of these functions is important for
    + * fall-back/fail-over reasons
    + */
    +#ifdef CONFIG_GART_IOMMU
    + gart_iommu_hole_init();
    +#endif
    +
    +#ifdef CONFIG_CALGARY_IOMMU
    + detect_calgary();
    +#endif
    +
    + detect_intel_iommu();
    +
    +#ifdef CONFIG_SWIOTLB
    + pci_swiotlb_init();
    +#endif
    +}
    +#endif
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index e194460..7820675 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -268,71 +268,3 @@ static __init int iommu_setup(char *p)
    return 0;
    }
    early_param("iommu", iommu_setup);
    -
    -static __initdata void *dma32_bootmem_ptr;
    -static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
    -
    -static int __init parse_dma32_size_opt(char *p)
    -{
    - if (!p)
    - return -EINVAL;
    - dma32_bootmem_size = memparse(p, &p);
    - return 0;
    -}
    -early_param("dma32_size", parse_dma32_size_opt);
    -
    -void __init dma32_reserve_bootmem(void)
    -{
    - unsigned long size, align;
    - if (end_pfn <= MAX_DMA32_PFN)
    - return;
    -
    - align = 64ULL<<20;
    - size = round_up(dma32_bootmem_size, align);
    - dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
    - __pa(MAX_DMA_ADDRESS));
    - if (dma32_bootmem_ptr)
    - dma32_bootmem_size = size;
    - else
    - dma32_bootmem_size = 0;
    -}
    -static void __init dma32_free_bootmem(void)
    -{
    - int node;
    -
    - if (end_pfn <= MAX_DMA32_PFN)
    - return;
    -
    - if (!dma32_bootmem_ptr)
    - return;
    -
    - for_each_online_node(node)
    - free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
    - dma32_bootmem_size);
    -
    - dma32_bootmem_ptr = NULL;
    - dma32_bootmem_size = 0;
    -}
    -
    -void __init pci_iommu_alloc(void)
    -{
    - /* free the range so iommu could get some range less than 4G */
    - dma32_free_bootmem();
    - /*
    - * The order of these functions is important for
    - * fall-back/fail-over reasons
    - */
    -#ifdef CONFIG_GART_IOMMU
    - gart_iommu_hole_init();
    -#endif
    -
    -#ifdef CONFIG_CALGARY_IOMMU
    - detect_calgary();
    -#endif
    -
    - detect_intel_iommu();
    -
    -#ifdef CONFIG_SWIOTLB
    - pci_swiotlb_init();
    -#endif
    -}
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. [PATCH 27/28] x86: don't do dma if mask is NULL.

    if the device hasn't provided a mask, abort allocation.
    Note that we're using a fallback device now, so it does not cover
    the case of a NULL device: just drivers passing NULL masks around.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 3 +++
    1 files changed, 3 insertions(+), 0 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index f134de3..d2f7074 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -91,6 +91,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
    if (dma_mask == 0)
    dma_mask = DMA_32BIT_MASK;

    + if (dev->dma_mask == NULL)
    + return NULL;
    +
    /* Don't invoke OOM killer */
    gfp |= __GFP_NORETRY;
    again:
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. [PATCH 23/28] x86: don't try to allocate from DMA zone at first

    If we fail, we'll loop into the allocation again,
    and then allocate in the DMA zone.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 3 ---
    1 files changed, 0 insertions(+), 3 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 0e9ec11..11f100a 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -84,9 +84,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
    if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret))
    return ret;

    - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
    - gfp |= GFP_DMA;
    -
    if (!dev)
    dev = &fallback_dev;

    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  5. [PATCH 19/28] x86: remove virt_to_bus in pci-dma_64.c

    virt_to_bus() is deprecated according to the docs, and moreover,
    won't return the right thing in i386 if we're dealing with high memory mappings.
    So we make our allocation function return a page, and then use page_address() (for
    virtual addr) and page_to_phys() (for physical addr) instead.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_64.c | 14 +++++++-------
    1 files changed, 7 insertions(+), 7 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index 5f03e41..13a31a4 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -28,13 +28,11 @@ struct device fallback_dev = {
    noinline static void *
    dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
    {
    - struct page *page;
    int node;

    node = dev_to_node(dev);

    - page = alloc_pages_node(node, gfp, order);
    - return page ? page_address(page) : NULL;
    + return alloc_pages_node(node, gfp, order);
    }

    #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
    @@ -47,6 +45,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
    gfp_t gfp)
    {
    void *memory;
    + struct page *page;
    unsigned long dma_mask = 0;
    u64 bus;

    @@ -79,13 +78,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
    gfp |= GFP_DMA32;

    again:
    - memory = dma_alloc_pages(dev, gfp, get_order(size));
    - if (memory == NULL)
    + page = dma_alloc_pages(dev, gfp, get_order(size));
    + if (page == NULL)
    return NULL;

    {
    int high, mmu;
    - bus = virt_to_bus(memory);
    + bus = page_to_phys(page);
    + memory = page_address(page);
    high = (bus + size) >= dma_mask;
    mmu = high;
    if (force_iommu && !(gfp & GFP_DMA))
    @@ -112,7 +112,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,

    memset(memory, 0, size);
    if (!mmu) {
    - *dma_handle = virt_to_bus(memory);
    + *dma_handle = bus;
    return memory;
    }
    }
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  6. [PATCH 18/28] x86: move bad_dma_address

    It goes to pci-dma.c, and is removed from the arch-specific files.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma.c | 2 ++
    arch/x86/kernel/pci-dma_32.c | 4 ----
    arch/x86/kernel/pci-dma_64.c | 2 --
    3 files changed, 2 insertions(+), 6 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index d06d8df..d6734ed 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -35,6 +35,8 @@ int iommu_detected __read_mostly = 0;
    int iommu_bio_merge __read_mostly = 0;
    EXPORT_SYMBOL(iommu_bio_merge);

    +dma_addr_t bad_dma_address __read_mostly = 0;
    +EXPORT_SYMBOL(bad_dma_address);

    int dma_set_mask(struct device *dev, u64 mask)
    {
    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 49166a4..5ae3470 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -14,10 +14,6 @@
    #include
    #include

    -/* For i386, we make it point to the NULL address */
    -dma_addr_t bad_dma_address __read_mostly = 0x0;
    -EXPORT_SYMBOL(bad_dma_address);
    -
    static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
    dma_addr_t *dma_handle, void **ret)
    {
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index 6eacd58..5f03e41 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -14,8 +14,6 @@
    #include
    #include

    -dma_addr_t bad_dma_address __read_mostly;
    -EXPORT_SYMBOL(bad_dma_address);

    /* Dummy device used for NULL arguments (normally ISA). Better would
    be probably a smaller DMA mask, but this is bug-to-bug compatible
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  7. [PATCH 10/28] x86: unify pci-nommu

    merge pci-base_32.c and pci-nommu_64.c into pci-nommu.c
    Their code were made the same, so now they can be merged.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/Makefile | 5 +-
    arch/x86/kernel/pci-base_32.c | 60 ------------------------
    arch/x86/kernel/pci-dma.c | 8 +++
    arch/x86/kernel/pci-dma_64.c | 8 ---
    arch/x86/kernel/pci-nommu.c | 100 ++++++++++++++++++++++++++++++++++++++++
    arch/x86/kernel/pci-nommu_64.c | 100 ----------------------------------------
    6 files changed, 110 insertions(+), 171 deletions(-)
    delete mode 100644 arch/x86/kernel/pci-base_32.c
    create mode 100644 arch/x86/kernel/pci-nommu.c
    delete mode 100644 arch/x86/kernel/pci-nommu_64.c

    diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
    index befe901..b2a1358 100644
    --- a/arch/x86/kernel/Makefile
    +++ b/arch/x86/kernel/Makefile
    @@ -25,9 +25,8 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
    obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
    obj-y += pci-dma_$(BITS).o bootflag.o e820_$(BITS).o
    obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
    -obj-y += alternative.o i8253.o
    -obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o
    -obj-$(CONFIG_X86_32) += pci-base_32.o
    +obj-y += alternative.o i8253.o pci-nommu.o
    +obj-$(CONFIG_X86_64) += bugs_64.o
    obj-y += tsc_$(BITS).o io_delay.o rtc.o

    obj-y += process.o
    diff --git a/arch/x86/kernel/pci-base_32.c b/arch/x86/kernel/pci-base_32.c
    deleted file mode 100644
    index b44ea51..0000000
    --- a/arch/x86/kernel/pci-base_32.c
    +++ /dev/null
    @@ -1,60 +0,0 @@
    -#include
    -#include
    -#include
    -#include
    -#include
    -
    -static dma_addr_t pci32_map_single(struct device *dev, phys_addr_t ptr,
    - size_t size, int direction)
    -{
    - WARN_ON(size == 0);
    - flush_write_buffers();
    - return ptr;
    -}
    -
    -static int pci32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
    - int nents, int direction)
    -{
    - struct scatterlist *sg;
    - int i;
    -
    - WARN_ON(nents == 0 || sglist[0].length == 0);
    -
    - for_each_sg(sglist, sg, nents, i) {
    - BUG_ON(!sg_page(sg));
    -
    - sg->dma_address = sg_phys(sg);
    - sg->dma_length = sg->length;
    - }
    -
    - flush_write_buffers();
    - return nents;
    -}
    -
    -/* Make sure we keep the same behaviour */
    -static int pci32_map_error(dma_addr_t dma_addr)
    -{
    - return 0;
    -}
    -
    -const struct dma_mapping_ops pci32_dma_ops = {
    - .map_single = pci32_map_single,
    - .unmap_single = NULL,
    - .map_sg = pci32_dma_map_sg,
    - .unmap_sg = NULL,
    - .sync_single_for_cpu = NULL,
    - .sync_single_for_device = NULL,
    - .sync_single_range_for_cpu = NULL,
    - .sync_single_range_for_device = NULL,
    - .sync_sg_for_cpu = NULL,
    - .sync_sg_for_device = NULL,
    - .mapping_error = pci32_map_error,
    -};
    -
    -/* this is temporary */
    -int __init no_iommu_init(void)
    -{
    - dma_ops = &pci32_dma_ops;
    - return 0;
    -}
    -fs_initcall(no_iommu_init);
    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index d30634b..6b77fd8 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -7,6 +7,14 @@
    const struct dma_mapping_ops *dma_ops;
    EXPORT_SYMBOL(dma_ops);

    +#ifdef CONFIG_IOMMU_DEBUG
    +int panic_on_overflow __read_mostly = 1;
    +int force_iommu __read_mostly = 1;
    +#else
    +int panic_on_overflow __read_mostly = 0;
    +int force_iommu __read_mostly = 0;
    +#endif
    +
    int dma_set_mask(struct device *dev, u64 mask)
    {
    if (!dev->dma_mask || !dma_supported(dev, mask))
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index e95f671..4202130 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -27,14 +27,6 @@ EXPORT_SYMBOL(iommu_bio_merge);
    static int iommu_sac_force __read_mostly = 0;

    int no_iommu __read_mostly;
    -#ifdef CONFIG_IOMMU_DEBUG
    -int panic_on_overflow __read_mostly = 1;
    -int force_iommu __read_mostly = 1;
    -#else
    -int panic_on_overflow __read_mostly = 0;
    -int force_iommu __read_mostly= 0;
    -#endif
    -
    /* Set this to 1 if there is a HW IOMMU in the system */
    int iommu_detected __read_mostly = 0;

    diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
    new file mode 100644
    index 0000000..aec43d5
    --- /dev/null
    +++ b/arch/x86/kernel/pci-nommu.c
    @@ -0,0 +1,100 @@
    +/* Fallback functions when the main IOMMU code is not compiled in. This
    + code is roughly equivalent to i386. */
    +#include
    +#include
    +#include
    +#include
    +#include
    +#include
    +
    +#include
    +#include
    +#include
    +
    +static int
    +check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
    +{
    + if (hwdev && bus + size > *hwdev->dma_mask) {
    + if (*hwdev->dma_mask >= DMA_32BIT_MASK)
    + printk(KERN_ERR
    + "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
    + name, (long long)bus, size,
    + (long long)*hwdev->dma_mask);
    + return 0;
    + }
    + return 1;
    +}
    +
    +static dma_addr_t
    +nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
    + int direction)
    +{
    + dma_addr_t bus = paddr;
    + WARN_ON(size == 0);
    + if (!check_addr("map_single", hwdev, bus, size))
    + return bad_dma_address;
    + flush_write_buffers();
    + return bus;
    +}
    +
    +
    +/* Map a set of buffers described by scatterlist in streaming
    + * mode for DMA. This is the scatter-gather version of the
    + * above pci_map_single interface. Here the scatter gather list
    + * elements are each tagged with the appropriate dma address
    + * and length. They are obtained via sg_dma_{address,length}(SG).
    + *
    + * NOTE: An implementation may be able to use a smaller number of
    + * DMA address/length pairs than there are SG table elements.
    + * (for example via virtual mapping capabilities)
    + * The routine returns the number of addr/length pairs actually
    + * used, at most nents.
    + *
    + * Device ownership issues as mentioned above for pci_map_single are
    + * the same here.
    + */
    +static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
    + int nents, int direction)
    +{
    + struct scatterlist *s;
    + int i;
    +
    + WARN_ON(nents == 0 || sg[0].length == 0);
    +
    + for_each_sg(sg, s, nents, i) {
    + BUG_ON(!sg_page(s));
    + s->dma_address = sg_phys(s);
    + if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
    + return 0;
    + s->dma_length = s->length;
    + }
    + flush_write_buffers();
    + return nents;
    +}
    +
    +/* Make sure we keep the same behaviour */
    +static int nommu_mapping_error(dma_addr_t dma_addr)
    +{
    +#ifdef CONFIG_X86_32
    + return 0;
    +#else
    + return (dma_addr == bad_dma_address);
    +#endif
    +}
    +
    +
    +const struct dma_mapping_ops nommu_dma_ops = {
    + .map_single = nommu_map_single,
    + .map_sg = nommu_map_sg,
    + .mapping_error = nommu_mapping_error,
    + .is_phys = 1,
    +};
    +
    +void __init no_iommu_init(void)
    +{
    + if (dma_ops)
    + return;
    +
    + force_iommu = 0; /* no HW IOMMU */
    + dma_ops = &nommu_dma_ops;
    +}
    diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
    deleted file mode 100644
    index 8d036ae..0000000
    --- a/arch/x86/kernel/pci-nommu_64.c
    +++ /dev/null
    @@ -1,100 +0,0 @@
    -/* Fallback functions when the main IOMMU code is not compiled in. This
    - code is roughly equivalent to i386. */
    -#include
    -#include
    -#include
    -#include
    -#include
    -#include
    -
    -#include
    -#include
    -#include
    -
    -static int
    -check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
    -{
    - if (hwdev && bus + size > *hwdev->dma_mask) {
    - if (*hwdev->dma_mask >= DMA_32BIT_MASK)
    - printk(KERN_ERR
    - "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
    - name, (long long)bus, size,
    - (long long)*hwdev->dma_mask);
    - return 0;
    - }
    - return 1;
    -}
    -
    -static dma_addr_t
    -nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
    - int direction)
    -{
    - dma_addr_t bus = paddr;
    - WARN_ON(size == 0);
    - if (!check_addr("map_single", hwdev, bus, size))
    - return bad_dma_address;
    - flush_write_buffers();
    - return bus;
    -}
    -
    -
    -/* Map a set of buffers described by scatterlist in streaming
    - * mode for DMA. This is the scatter-gather version of the
    - * above pci_map_single interface. Here the scatter gather list
    - * elements are each tagged with the appropriate dma address
    - * and length. They are obtained via sg_dma_{address,length}(SG).
    - *
    - * NOTE: An implementation may be able to use a smaller number of
    - * DMA address/length pairs than there are SG table elements.
    - * (for example via virtual mapping capabilities)
    - * The routine returns the number of addr/length pairs actually
    - * used, at most nents.
    - *
    - * Device ownership issues as mentioned above for pci_map_single are
    - * the same here.
    - */
    -static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
    - int nents, int direction)
    -{
    - struct scatterlist *s;
    - int i;
    -
    - WARN_ON(nents == 0 || sg[0].length == 0);
    -
    - for_each_sg(sg, s, nents, i) {
    - BUG_ON(!sg_page(s));
    - s->dma_address = sg_phys(s);
    - if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
    - return 0;
    - s->dma_length = s->length;
    - }
    - flush_write_buffers();
    - return nents;
    -}
    -
    -/* Make sure we keep the same behaviour */
    -static int nommu_mapping_error(dma_addr_t dma_addr)
    -{
    -#ifdef CONFIG_X86_32
    - return 0;
    -#else
    - return (dma_addr == bad_dma_address);
    -#endif
    -}
    -
    -
    -const struct dma_mapping_ops nommu_dma_ops = {
    - .map_single = nommu_map_single,
    - .map_sg = nommu_map_sg,
    - .mapping_error = nommu_mapping_error,
    - .is_phys = 1,
    -};
    -
    -void __init no_iommu_init(void)
    -{
    - if (dma_ops)
    - return;
    -
    - force_iommu = 0; /* no HW IOMMU */
    - dma_ops = &nommu_dma_ops;
    -}
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  8. [PATCH 20/28] x86: use numa allocation function in i386

    We can do it here to, in the same way x86_64 does.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 27 ++++++++++++++++++++++-----
    1 files changed, 22 insertions(+), 5 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 5ae3470..0d630ae 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -48,10 +48,23 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr)
    return 0;
    }

    +/* Allocate DMA memory on node near device */
    +noinline struct page *
    +dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
    +{
    + int node;
    +
    + node = dev_to_node(dev);
    +
    + return alloc_pages_node(node, gfp, order);
    +}
    +
    void *dma_alloc_coherent(struct device *dev, size_t size,
    dma_addr_t *dma_handle, gfp_t gfp)
    {
    void *ret = NULL;
    + struct page *page;
    + dma_addr_t bus;
    int order = get_order(size);
    /* ignore region specifiers */
    gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
    @@ -62,12 +75,16 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
    if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
    gfp |= GFP_DMA;

    - ret = (void *)__get_free_pages(gfp, order);
    + page = dma_alloc_pages(dev, gfp, order);
    + if (page == NULL)
    + return NULL;
    +
    + ret = page_address(page);
    + bus = page_to_phys(page);
    +
    + memset(ret, 0, size);
    + *dma_handle = bus;

    - if (ret != NULL) {
    - memset(ret, 0, size);
    - *dma_handle = virt_to_phys(ret);
    - }
    return ret;
    }
    EXPORT_SYMBOL(dma_alloc_coherent);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread
Page 2 of 2 FirstFirst 1 2