[PATCH 0/28] integrate dma_ops - Kernel

This is a discussion on [PATCH 0/28] integrate dma_ops - Kernel ; Hi, This is the final integration of dma_ops between x86_64 and i386. The final code is closer to x86_64 than to i386, which is obviously expected. At the end, pci-dma_{32,64}.c are gone, pci-nommu_64.c is gone, and the temporary pci-base_32.c is ...

+ Reply to Thread
Page 1 of 2 1 2 LastLast
Results 1 to 20 of 28

Thread: [PATCH 0/28] integrate dma_ops

  1. [PATCH 0/28] integrate dma_ops

    Hi,

    This is the final integration of dma_ops between x86_64 and i386.
    The final code is closer to x86_64 than to i386, which is obviously expected.

    At the end, pci-dma_{32,64}.c are gone, pci-nommu_64.c is gone, and the temporary
    pci-base_32.c is gone too.

    This patchset received the same level of scrutiny as the others from my side:
    compiled tested in at least 6 different random configs, boot tested in my hardware.

    The final diffstat says:

    Documentation/feature-removal-schedule.txt | 7
    arch/x86/kernel/Makefile | 9
    arch/x86/kernel/pci-base_32.c | 72 ---
    arch/x86/kernel/pci-dma.c | 524 +++++++++++++++++++++++++++++
    arch/x86/kernel/pci-dma_32.c | 503 +++++++--------------------
    arch/x86/kernel/pci-dma_64.c | 443 +-----------------------
    arch/x86/kernel/pci-nommu.c | 100 +++++
    arch/x86/kernel/pci-nommu_64.c | 140 -------
    arch/x86/mm/init_64.c | 4
    include/asm-x86/dma-mapping.h | 14
    include/asm-x86/scatterlist.h | 3
    11 files changed, 832 insertions(+), 987 deletions(-)


    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  2. [PATCH 02/28] x86: delete empty functions from pci-nommu_64.c

    This functions are now called conditionally on their
    existence in the struct. So just delete them, instead
    of keeping an empty implementation.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-nommu_64.c | 15 ---------------
    1 files changed, 0 insertions(+), 15 deletions(-)

    diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
    index 6e33076..90a7c40 100644
    --- a/arch/x86/kernel/pci-nommu_64.c
    +++ b/arch/x86/kernel/pci-nommu_64.c
    @@ -35,10 +35,6 @@ nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
    return bus;
    }

    -static void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
    - int direction)
    -{
    -}

    /* Map a set of buffers described by scatterlist in streaming
    * mode for DMA. This is the scatter-gather version of the
    @@ -71,20 +67,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
    return nents;
    }

    -/* Unmap a set of streaming mode DMA translations.
    - * Again, cpu read rules concerning calls here are the same as for
    - * pci_unmap_single() above.
    - */
    -static void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
    - int nents, int dir)
    -{
    -}
    -
    const struct dma_mapping_ops nommu_dma_ops = {
    .map_single = nommu_map_single,
    - .unmap_single = nommu_unmap_single,
    .map_sg = nommu_map_sg,
    - .unmap_sg = nommu_unmap_sg,
    .is_phys = 1,
    };

    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  3. [PATCH 07/28] x86: use WARN_ON in mapping functions

    In the very same way i386 do, we use WARN_ON functions
    in map_simple and map_sg.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-nommu_64.c | 3 +++
    1 files changed, 3 insertions(+), 0 deletions(-)

    diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
    index c6901e7..8d036ae 100644
    --- a/arch/x86/kernel/pci-nommu_64.c
    +++ b/arch/x86/kernel/pci-nommu_64.c
    @@ -30,6 +30,7 @@ nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
    int direction)
    {
    dma_addr_t bus = paddr;
    + WARN_ON(size == 0);
    if (!check_addr("map_single", hwdev, bus, size))
    return bad_dma_address;
    flush_write_buffers();
    @@ -58,6 +59,8 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
    struct scatterlist *s;
    int i;

    + WARN_ON(nents == 0 || sg[0].length == 0);
    +
    for_each_sg(sg, s, nents, i) {
    BUG_ON(!sg_page(s));
    s->dma_address = sg_phys(s);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  4. [PATCH 01/28] x86: introduce pci-dma.c

    This patch introduces pci-dma.c, a common file for pci dma
    between i386 and x86_64. As a start, dma_set_mask() is the same
    between architectures, and is placed there.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/Makefile | 2 +-
    arch/x86/kernel/pci-dma.c | 14 ++++++++++++++
    arch/x86/kernel/pci-dma_32.c | 12 ------------
    arch/x86/kernel/pci-dma_64.c | 9 ---------
    4 files changed, 15 insertions(+), 22 deletions(-)
    create mode 100644 arch/x86/kernel/pci-dma.c

    diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
    index 53c8fa4..befe901 100644
    --- a/arch/x86/kernel/Makefile
    +++ b/arch/x86/kernel/Makefile
    @@ -24,7 +24,7 @@ obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
    obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
    obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
    obj-y += pci-dma_$(BITS).o bootflag.o e820_$(BITS).o
    -obj-y += quirks.o i8237.o topology.o kdebugfs.o
    +obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
    obj-y += alternative.o i8253.o
    obj-$(CONFIG_X86_64) += pci-nommu_64.o bugs_64.o
    obj-$(CONFIG_X86_32) += pci-base_32.o
    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    new file mode 100644
    index 0000000..f1c24d8
    --- /dev/null
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -0,0 +1,14 @@
    +#include
    +
    +int dma_set_mask(struct device *dev, u64 mask)
    +{
    + if (!dev->dma_mask || !dma_supported(dev, mask))
    + return -EIO;
    +
    + *dev->dma_mask = mask;
    +
    + return 0;
    +}
    +EXPORT_SYMBOL(dma_set_mask);
    +
    +
    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index be6b1f6..9e82976 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -182,18 +182,6 @@ dma_supported(struct device *dev, u64 mask)
    }
    EXPORT_SYMBOL(dma_supported);

    -int
    -dma_set_mask(struct device *dev, u64 mask)
    -{
    - if (!dev->dma_mask || !dma_supported(dev, mask))
    - return -EIO;
    -
    - *dev->dma_mask = mask;
    -
    - return 0;
    -}
    -EXPORT_SYMBOL(dma_set_mask);
    -

    static __devinit void via_no_dac(struct pci_dev *dev)
    {
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index f97a08d..e697b86 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -213,15 +213,6 @@ int dma_supported(struct device *dev, u64 mask)
    }
    EXPORT_SYMBOL(dma_supported);

    -int dma_set_mask(struct device *dev, u64 mask)
    -{
    - if (!dev->dma_mask || !dma_supported(dev, mask))
    - return -EIO;
    - *dev->dma_mask = mask;
    - return 0;
    -}
    -EXPORT_SYMBOL(dma_set_mask);
    -
    /*
    * See for the iommu kernel parameter
    * documentation.
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  5. [PATCH 13/28] x86: merge dma_supported

    The code for both arches are very similar, so this patch merge them.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma.c | 44 ++++++++++++++++++++++++++++++++++++++++++
    arch/x86/kernel/pci-dma_32.c | 24 ----------------------
    arch/x86/kernel/pci-dma_64.c | 44 +-----------------------------------------
    3 files changed, 45 insertions(+), 67 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index f6d6a92..4289a9b 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -14,6 +14,8 @@ EXPORT_SYMBOL(forbid_dac);
    const struct dma_mapping_ops *dma_ops;
    EXPORT_SYMBOL(dma_ops);

    +int iommu_sac_force __read_mostly = 0;
    +
    #ifdef CONFIG_IOMMU_DEBUG
    int panic_on_overflow __read_mostly = 1;
    int force_iommu __read_mostly = 1;
    @@ -33,6 +35,48 @@ int dma_set_mask(struct device *dev, u64 mask)
    }
    EXPORT_SYMBOL(dma_set_mask);

    +int dma_supported(struct device *dev, u64 mask)
    +{
    +#ifdef CONFIG_PCI
    + if (mask > 0xffffffff && forbid_dac > 0) {
    + printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
    + dev->bus_id);
    + return 0;
    + }
    +#endif
    +
    + if (dma_ops->dma_supported)
    + return dma_ops->dma_supported(dev, mask);
    +
    + /* Copied from i386. Doesn't make much sense, because it will
    + only work for pci_alloc_coherent.
    + The caller just has to use GFP_DMA in this case. */
    + if (mask < DMA_24BIT_MASK)
    + return 0;
    +
    + /* Tell the device to use SAC when IOMMU force is on. This
    + allows the driver to use cheaper accesses in some cases.
    +
    + Problem with this is that if we overflow the IOMMU area and
    + return DAC as fallback address the device may not handle it
    + correctly.
    +
    + As a special case some controllers have a 39bit address
    + mode that is as efficient as 32bit (aic79xx). Don't force
    + SAC for these. Assume all masks <= 40 bits are of this
    + type. Normally this doesn't make any difference, but gives
    + more gentle handling of IOMMU overflow. */
    + if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
    + printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
    + dev->bus_id, mask);
    + return 0;
    + }
    +
    + return 1;
    +}
    +EXPORT_SYMBOL(dma_supported);
    +
    +
    static int __init pci_iommu_init(void)
    {
    #ifdef CONFIG_CALGARY_IOMMU
    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 6543bb3..1d4091a 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -155,30 +155,6 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
    EXPORT_SYMBOL(dma_mark_declared_memory_occupied);

    #ifdef CONFIG_PCI
    -/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
    -
    -int
    -dma_supported(struct device *dev, u64 mask)
    -{
    - /*
    - * we fall back to GFP_DMA when the mask isn't all 1s,
    - * so we can't guarantee allocations that must be
    - * within a tighter range than GFP_DMA..
    - */
    - if (mask < 0x00ffffff)
    - return 0;
    -
    - /* Work around chipset bugs */
    - if (forbid_dac > 0 && mask > 0xffffffffULL)
    - return 0;
    -
    - if (dma_ops->dma_supported)
    - return dma_ops->dma_supported(dev, mask);
    -
    - return 1;
    -}
    -EXPORT_SYMBOL(dma_supported);
    -
    static int check_iommu(char *s)
    {
    if (!strcmp(s, "usedac")) {
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index 7820675..c80da76 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -24,7 +24,7 @@ EXPORT_SYMBOL(bad_dma_address);
    int iommu_bio_merge __read_mostly = 0;
    EXPORT_SYMBOL(iommu_bio_merge);

    -static int iommu_sac_force __read_mostly = 0;
    +extern int iommu_sac_force;

    int no_iommu __read_mostly;
    /* Set this to 1 if there is a HW IOMMU in the system */
    @@ -161,48 +161,6 @@ void dma_free_coherent(struct device *dev, size_t size,
    }
    EXPORT_SYMBOL(dma_free_coherent);

    -int dma_supported(struct device *dev, u64 mask)
    -{
    -#ifdef CONFIG_PCI
    - if (mask > 0xffffffff && forbid_dac > 0) {
    -
    -
    -
    - printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", dev->bus_id);
    - return 0;
    - }
    -#endif
    -
    - if (dma_ops->dma_supported)
    - return dma_ops->dma_supported(dev, mask);
    -
    - /* Copied from i386. Doesn't make much sense, because it will
    - only work for pci_alloc_coherent.
    - The caller just has to use GFP_DMA in this case. */
    - if (mask < DMA_24BIT_MASK)
    - return 0;
    -
    - /* Tell the device to use SAC when IOMMU force is on. This
    - allows the driver to use cheaper accesses in some cases.
    -
    - Problem with this is that if we overflow the IOMMU area and
    - return DAC as fallback address the device may not handle it
    - correctly.
    -
    - As a special case some controllers have a 39bit address
    - mode that is as efficient as 32bit (aic79xx). Don't force
    - SAC for these. Assume all masks <= 40 bits are of this
    - type. Normally this doesn't make any difference, but gives
    - more gentle handling of IOMMU overflow. */
    - if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
    - printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
    - return 0;
    - }
    -
    - return 1;
    -}
    -EXPORT_SYMBOL(dma_supported);
    -
    /*
    * See for the iommu kernel parameter
    * documentation.
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  6. [PATCH 04/28] x86: Add flush_write_buffers in nommu functions

    This patch adds flush_write_buffers() in some functions of pci-nommu_64.c
    They are added anywhere i386 would also have it. This is not a problem
    for x86_64, since flush_rite_buffers() an nop for it.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-nommu_64.c | 2 ++
    1 files changed, 2 insertions(+), 0 deletions(-)

    diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
    index a4e8ccf..1da9cf9 100644
    --- a/arch/x86/kernel/pci-nommu_64.c
    +++ b/arch/x86/kernel/pci-nommu_64.c
    @@ -32,6 +32,7 @@ nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
    dma_addr_t bus = paddr;
    if (!check_addr("map_single", hwdev, bus, size))
    return bad_dma_address;
    + flush_write_buffers();
    return bus;
    }

    @@ -64,6 +65,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
    return 0;
    s->dma_length = s->length;
    }
    + flush_write_buffers();
    return nents;
    }

    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  7. [PATCH 06/28] x86: use dma_length in i386

    This is done to get the code closer to x86_64.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-base_32.c | 1 +
    include/asm-x86/scatterlist.h | 2 --
    2 files changed, 1 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/kernel/pci-base_32.c b/arch/x86/kernel/pci-base_32.c
    index 7caf5c2..837bbe9 100644
    --- a/arch/x86/kernel/pci-base_32.c
    +++ b/arch/x86/kernel/pci-base_32.c
    @@ -24,6 +24,7 @@ static int pci32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
    BUG_ON(!sg_page(sg));

    sg->dma_address = sg_phys(sg);
    + sg->dma_length = sg->length;
    }

    flush_write_buffers();
    diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h
    index d13c197..c043206 100644
    --- a/include/asm-x86/scatterlist.h
    +++ b/include/asm-x86/scatterlist.h
    @@ -11,9 +11,7 @@ struct scatterlist {
    unsigned int offset;
    unsigned int length;
    dma_addr_t dma_address;
    -#ifdef CONFIG_X86_64
    unsigned int dma_length;
    -#endif
    };

    #define ARCH_HAS_SG_CHAIN
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  8. [PATCH 22/28] x86: use a fallback dev for i386

    We can use a fallback dev for cases of a NULL device being passed (mostly ISA)
    This comes from x86_64 implementation.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 13 +++++++++++++
    1 files changed, 13 insertions(+), 0 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index f6cf434..0e9ec11 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -14,6 +14,16 @@
    #include
    #include

    +/* Dummy device used for NULL arguments (normally ISA). Better would
    + be probably a smaller DMA mask, but this is bug-to-bug compatible
    + to i386. */
    +struct device fallback_dev = {
    + .bus_id = "fallback device",
    + .coherent_dma_mask = DMA_32BIT_MASK,
    + .dma_mask = &fallback_dev.coherent_dma_mask,
    +};
    +
    +
    static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
    dma_addr_t *dma_handle, void **ret)
    {
    @@ -77,6 +87,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
    if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
    gfp |= GFP_DMA;

    + if (!dev)
    + dev = &fallback_dev;
    +
    dma_mask = dev->coherent_dma_mask;
    if (dma_mask == 0)
    dma_mask = DMA_32BIT_MASK;
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  9. [PATCH 11/28] x86: move pci fixup to pci-dma.c

    via_no_dac provides a fixup that is the same for both
    architectures. Move it to pci-dma.c.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma.c | 18 ++++++++++++++++++
    arch/x86/kernel/pci-dma_32.c | 13 -------------
    arch/x86/kernel/pci-dma_64.c | 15 ---------------
    include/asm-x86/dma-mapping.h | 2 +-
    4 files changed, 19 insertions(+), 29 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index 6b77fd8..e81e16f 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -1,9 +1,13 @@
    #include
    #include
    +#include

    #include
    #include

    +int forbid_dac __read_mostly;
    +EXPORT_SYMBOL(forbid_dac);
    +
    const struct dma_mapping_ops *dma_ops;
    EXPORT_SYMBOL(dma_ops);

    @@ -48,3 +52,17 @@ void pci_iommu_shutdown(void)
    }
    /* Must execute after PCI subsystem */
    fs_initcall(pci_iommu_init);
    +
    +#ifdef CONFIG_PCI
    +/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
    +
    +static __devinit void via_no_dac(struct pci_dev *dev)
    +{
    + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
    + printk(KERN_INFO "PCI: VIA PCI bridge detected."
    + "Disabling DAC.\n");
    + forbid_dac = 1;
    + }
    +}
    +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
    +#endif
    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 9e82976..6543bb3 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -157,9 +157,6 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
    #ifdef CONFIG_PCI
    /* Many VIA bridges seem to corrupt data for DAC. Disable it here */

    -int forbid_dac;
    -EXPORT_SYMBOL(forbid_dac);
    -
    int
    dma_supported(struct device *dev, u64 mask)
    {
    @@ -182,16 +179,6 @@ dma_supported(struct device *dev, u64 mask)
    }
    EXPORT_SYMBOL(dma_supported);

    -
    -static __devinit void via_no_dac(struct pci_dev *dev)
    -{
    - if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
    - printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
    - forbid_dac = 1;
    - }
    -}
    -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
    -
    static int check_iommu(char *s)
    {
    if (!strcmp(s, "usedac")) {
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index 4202130..e194460 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -161,8 +161,6 @@ void dma_free_coherent(struct device *dev, size_t size,
    }
    EXPORT_SYMBOL(dma_free_coherent);

    -static int forbid_dac __read_mostly;
    -
    int dma_supported(struct device *dev, u64 mask)
    {
    #ifdef CONFIG_PCI
    @@ -338,16 +336,3 @@ void __init pci_iommu_alloc(void)
    pci_swiotlb_init();
    #endif
    }
    -
    -#ifdef CONFIG_PCI
    -/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
    -
    -static __devinit void via_no_dac(struct pci_dev *dev)
    -{
    - if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
    - printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
    - forbid_dac = 1;
    - }
    -}
    -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
    -#endif
    diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
    index 914846d..d82517d 100644
    --- a/include/asm-x86/dma-mapping.h
    +++ b/include/asm-x86/dma-mapping.h
    @@ -14,6 +14,7 @@ extern dma_addr_t bad_dma_address;
    extern int iommu_merge;
    extern struct device fallback_dev;
    extern int panic_on_overflow;
    +extern int forbid_dac;

    struct dma_mapping_ops {
    int (*mapping_error)(dma_addr_t dma_addr);
    @@ -223,6 +224,5 @@ dma_release_declared_memory(struct device *dev);
    extern void *
    dma_mark_declared_memory_occupied(struct device *dev,
    dma_addr_t device_addr, size_t size);
    -extern int forbid_dac;
    #endif /* CONFIG_X86_32 */
    #endif
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  10. [PATCH 25/28] x86: remove kludge from x86_64

    The claim is that i386 does it. Just it does not.
    So remove it.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_64.c | 4 ----
    1 files changed, 0 insertions(+), 4 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index b956f59..596c8c8 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -68,10 +68,6 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
    /* Don't invoke OOM killer */
    gfp |= __GFP_NORETRY;

    - /* Kludge to make it bug-to-bug compatible with i386. i386
    - uses the normal dma_mask for alloc_coherent. */
    - dma_mask &= *dev->dma_mask;
    -
    /* Why <=? Even when the mask is smaller than 4GB it is often
    larger than 16MB and in this case we have a chance of
    finding fitting memory in the next higher zone first. If
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  11. [PATCH 14/28] x86: merge iommu initialization parameters

    we merge the iommu initialization parameters in pci-dma.c
    Nice thing, that both architectures at least recognize the same
    parameters.

    usedac i386 parameter is marked for deprecation

    Signed-off-by: Glauber Costa
    ---
    Documentation/feature-removal-schedule.txt | 7 +++
    arch/x86/kernel/pci-dma.c | 81 ++++++++++++++++++++++++++++
    arch/x86/kernel/pci-dma_32.c | 12 ----
    arch/x86/kernel/pci-dma_64.c | 79 ---------------------------
    include/asm-x86/dma-mapping.h | 1 +
    5 files changed, 89 insertions(+), 91 deletions(-)

    diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
    index 1092b2e..537c88b 100644
    --- a/Documentation/feature-removal-schedule.txt
    +++ b/Documentation/feature-removal-schedule.txt
    @@ -306,3 +306,10 @@ Why: Not used in-tree. The current out-of-tree users used it to
    code / infrastructure should be in the kernel and not in some
    out-of-tree driver.
    Who: Thomas Gleixner
    +
    +----------------------------
    +
    +What: usedac i386 kernel parameter
    +When: 2.6.27
    +Why: replaced by allowdac and no dac combination
    +Who: Glauber Costa
    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index 4289a9b..e04f42c 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -24,6 +24,18 @@ int panic_on_overflow __read_mostly = 0;
    int force_iommu __read_mostly = 0;
    #endif

    +int iommu_merge __read_mostly = 0;
    +
    +int no_iommu __read_mostly;
    +/* Set this to 1 if there is a HW IOMMU in the system */
    +int iommu_detected __read_mostly = 0;
    +
    +/* This tells the BIO block layer to assume merging. Default to off
    + because we cannot guarantee merging later. */
    +int iommu_bio_merge __read_mostly = 0;
    +EXPORT_SYMBOL(iommu_bio_merge);
    +
    +
    int dma_set_mask(struct device *dev, u64 mask)
    {
    if (!dev->dma_mask || !dma_supported(dev, mask))
    @@ -183,3 +195,72 @@ void __init pci_iommu_alloc(void)
    #endif
    }
    #endif
    +
    +/*
    + * See for the iommu kernel parameter
    + * documentation.
    + */
    +static __init int iommu_setup(char *p)
    +{
    + iommu_merge = 1;
    +
    + if (!p)
    + return -EINVAL;
    +
    + while (*p) {
    + if (!strncmp(p, "off", 3))
    + no_iommu = 1;
    + /* gart_parse_options has more force support */
    + if (!strncmp(p, "force", 5))
    + force_iommu = 1;
    + if (!strncmp(p, "noforce", 7)) {
    + iommu_merge = 0;
    + force_iommu = 0;
    + }
    +
    + if (!strncmp(p, "biomerge", 8)) {
    + iommu_bio_merge = 4096;
    + iommu_merge = 1;
    + force_iommu = 1;
    + }
    + if (!strncmp(p, "panic", 5))
    + panic_on_overflow = 1;
    + if (!strncmp(p, "nopanic", 7))
    + panic_on_overflow = 0;
    + if (!strncmp(p, "merge", 5)) {
    + iommu_merge = 1;
    + force_iommu = 1;
    + }
    + if (!strncmp(p, "nomerge", 7))
    + iommu_merge = 0;
    + if (!strncmp(p, "forcesac", 8))
    + iommu_sac_force = 1;
    + if (!strncmp(p, "allowdac", 8))
    + forbid_dac = 0;
    + if (!strncmp(p, "nodac", 5))
    + forbid_dac = -1;
    + if (!strncmp(p, "usedac", 6)) {
    + forbid_dac = -1;
    + return 1;
    + }
    +#ifdef CONFIG_SWIOTLB
    + if (!strncmp(p, "soft", 4))
    + swiotlb = 1;
    +#endif
    +
    +#ifdef CONFIG_GART_IOMMU
    + gart_parse_options(p);
    +#endif
    +
    +#ifdef CONFIG_CALGARY_IOMMU
    + if (!strncmp(p, "calgary", 7))
    + use_calgary = 1;
    +#endif /* CONFIG_CALGARY_IOMMU */
    +
    + p += strcspn(p, ",");
    + if (*p == ',')
    + ++p;
    + }
    + return 0;
    +}
    +early_param("iommu", iommu_setup);
    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 1d4091a..eea52df 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -153,15 +153,3 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
    return mem->virt_base + (pos << PAGE_SHIFT);
    }
    EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
    -
    -#ifdef CONFIG_PCI
    -static int check_iommu(char *s)
    -{
    - if (!strcmp(s, "usedac")) {
    - forbid_dac = -1;
    - return 1;
    - }
    - return 0;
    -}
    -__setup("iommu=", check_iommu);
    -#endif
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index c80da76..e7d45cf 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -14,22 +14,9 @@
    #include
    #include

    -int iommu_merge __read_mostly = 0;
    -
    dma_addr_t bad_dma_address __read_mostly;
    EXPORT_SYMBOL(bad_dma_address);

    -/* This tells the BIO block layer to assume merging. Default to off
    - because we cannot guarantee merging later. */
    -int iommu_bio_merge __read_mostly = 0;
    -EXPORT_SYMBOL(iommu_bio_merge);
    -
    -extern int iommu_sac_force;
    -
    -int no_iommu __read_mostly;
    -/* Set this to 1 if there is a HW IOMMU in the system */
    -int iommu_detected __read_mostly = 0;
    -
    /* Dummy device used for NULL arguments (normally ISA). Better would
    be probably a smaller DMA mask, but this is bug-to-bug compatible
    to i386. */
    @@ -160,69 +147,3 @@ void dma_free_coherent(struct device *dev, size_t size,
    free_pages((unsigned long)vaddr, get_order(size));
    }
    EXPORT_SYMBOL(dma_free_coherent);
    -
    -/*
    - * See for the iommu kernel parameter
    - * documentation.
    - */
    -static __init int iommu_setup(char *p)
    -{
    - iommu_merge = 1;
    -
    - if (!p)
    - return -EINVAL;
    -
    - while (*p) {
    - if (!strncmp(p, "off", 3))
    - no_iommu = 1;
    - /* gart_parse_options has more force support */
    - if (!strncmp(p, "force", 5))
    - force_iommu = 1;
    - if (!strncmp(p, "noforce", 7)) {
    - iommu_merge = 0;
    - force_iommu = 0;
    - }
    -
    - if (!strncmp(p, "biomerge", 8)) {
    - iommu_bio_merge = 4096;
    - iommu_merge = 1;
    - force_iommu = 1;
    - }
    - if (!strncmp(p, "panic", 5))
    - panic_on_overflow = 1;
    - if (!strncmp(p, "nopanic", 7))
    - panic_on_overflow = 0;
    - if (!strncmp(p, "merge", 5)) {
    - iommu_merge = 1;
    - force_iommu = 1;
    - }
    - if (!strncmp(p, "nomerge", 7))
    - iommu_merge = 0;
    - if (!strncmp(p, "forcesac", 8))
    - iommu_sac_force = 1;
    - if (!strncmp(p, "allowdac", 8))
    - forbid_dac = 0;
    - if (!strncmp(p, "nodac", 5))
    - forbid_dac = -1;
    -
    -#ifdef CONFIG_SWIOTLB
    - if (!strncmp(p, "soft", 4))
    - swiotlb = 1;
    -#endif
    -
    -#ifdef CONFIG_GART_IOMMU
    - gart_parse_options(p);
    -#endif
    -
    -#ifdef CONFIG_CALGARY_IOMMU
    - if (!strncmp(p, "calgary", 7))
    - use_calgary = 1;
    -#endif /* CONFIG_CALGARY_IOMMU */
    -
    - p += strcspn(p, ",");
    - if (*p == ',')
    - ++p;
    - }
    - return 0;
    -}
    -early_param("iommu", iommu_setup);
    diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
    index d82517d..7580736 100644
    --- a/include/asm-x86/dma-mapping.h
    +++ b/include/asm-x86/dma-mapping.h
    @@ -15,6 +15,7 @@ extern int iommu_merge;
    extern struct device fallback_dev;
    extern int panic_on_overflow;
    extern int forbid_dac;
    +extern int force_iommu;

    struct dma_mapping_ops {
    int (*mapping_error)(dma_addr_t dma_addr);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  12. [PATCH 15/28] x86: move dma_coherent functions to pci-dma.c

    They are placed in an ifdef, since they are i386 specific
    the structure definition goes to dma-mapping.h.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma.c | 81 +++++++++++++++++++++++++++++++++++++++
    arch/x86/kernel/pci-dma_32.c | 85 -----------------------------------------
    include/asm-x86/dma-mapping.h | 8 ++++
    3 files changed, 89 insertions(+), 85 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index e04f42c..d06d8df 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -47,6 +47,87 @@ int dma_set_mask(struct device *dev, u64 mask)
    }
    EXPORT_SYMBOL(dma_set_mask);

    +#ifdef CONFIG_X86_32
    +int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
    + dma_addr_t device_addr, size_t size, int flags)
    +{
    + void __iomem *mem_base = NULL;
    + int pages = size >> PAGE_SHIFT;
    + int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
    +
    + if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
    + goto out;
    + if (!size)
    + goto out;
    + if (dev->dma_mem)
    + goto out;
    +
    + /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
    +
    + mem_base = ioremap(bus_addr, size);
    + if (!mem_base)
    + goto out;
    +
    + dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
    + if (!dev->dma_mem)
    + goto out;
    + dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
    + if (!dev->dma_mem->bitmap)
    + goto free1_out;
    +
    + dev->dma_mem->virt_base = mem_base;
    + dev->dma_mem->device_base = device_addr;
    + dev->dma_mem->size = pages;
    + dev->dma_mem->flags = flags;
    +
    + if (flags & DMA_MEMORY_MAP)
    + return DMA_MEMORY_MAP;
    +
    + return DMA_MEMORY_IO;
    +
    + free1_out:
    + kfree(dev->dma_mem);
    + out:
    + if (mem_base)
    + iounmap(mem_base);
    + return 0;
    +}
    +EXPORT_SYMBOL(dma_declare_coherent_memory);
    +
    +void dma_release_declared_memory(struct device *dev)
    +{
    + struct dma_coherent_mem *mem = dev->dma_mem;
    +
    + if (!mem)
    + return;
    + dev->dma_mem = NULL;
    + iounmap(mem->virt_base);
    + kfree(mem->bitmap);
    + kfree(mem);
    +}
    +EXPORT_SYMBOL(dma_release_declared_memory);
    +
    +void *dma_mark_declared_memory_occupied(struct device *dev,
    + dma_addr_t device_addr, size_t size)
    +{
    + struct dma_coherent_mem *mem = dev->dma_mem;
    + int pos, err;
    + int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
    +
    + pages >>= PAGE_SHIFT;
    +
    + if (!mem)
    + return ERR_PTR(-EINVAL);
    +
    + pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
    + err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
    + if (err != 0)
    + return ERR_PTR(err);
    + return mem->virt_base + (pos << PAGE_SHIFT);
    +}
    +EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
    +#endif /* CONFIG_X86_32 */
    +
    int dma_supported(struct device *dev, u64 mask)
    {
    #ifdef CONFIG_PCI
    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index eea52df..818d95e 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -18,14 +18,6 @@
    dma_addr_t bad_dma_address __read_mostly = 0x0;
    EXPORT_SYMBOL(bad_dma_address);

    -struct dma_coherent_mem {
    - void *virt_base;
    - u32 device_base;
    - int size;
    - int flags;
    - unsigned long *bitmap;
    -};
    -
    void *dma_alloc_coherent(struct device *dev, size_t size,
    dma_addr_t *dma_handle, gfp_t gfp)
    {
    @@ -76,80 +68,3 @@ void dma_free_coherent(struct device *dev, size_t size,
    free_pages((unsigned long)vaddr, order);
    }
    EXPORT_SYMBOL(dma_free_coherent);
    -
    -int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
    - dma_addr_t device_addr, size_t size, int flags)
    -{
    - void __iomem *mem_base = NULL;
    - int pages = size >> PAGE_SHIFT;
    - int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
    -
    - if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
    - goto out;
    - if (!size)
    - goto out;
    - if (dev->dma_mem)
    - goto out;
    -
    - /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
    -
    - mem_base = ioremap(bus_addr, size);
    - if (!mem_base)
    - goto out;
    -
    - dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
    - if (!dev->dma_mem)
    - goto out;
    - dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
    - if (!dev->dma_mem->bitmap)
    - goto free1_out;
    -
    - dev->dma_mem->virt_base = mem_base;
    - dev->dma_mem->device_base = device_addr;
    - dev->dma_mem->size = pages;
    - dev->dma_mem->flags = flags;
    -
    - if (flags & DMA_MEMORY_MAP)
    - return DMA_MEMORY_MAP;
    -
    - return DMA_MEMORY_IO;
    -
    - free1_out:
    - kfree(dev->dma_mem);
    - out:
    - if (mem_base)
    - iounmap(mem_base);
    - return 0;
    -}
    -EXPORT_SYMBOL(dma_declare_coherent_memory);
    -
    -void dma_release_declared_memory(struct device *dev)
    -{
    - struct dma_coherent_mem *mem = dev->dma_mem;
    -
    - if(!mem)
    - return;
    - dev->dma_mem = NULL;
    - iounmap(mem->virt_base);
    - kfree(mem->bitmap);
    - kfree(mem);
    -}
    -EXPORT_SYMBOL(dma_release_declared_memory);
    -
    -void *dma_mark_declared_memory_occupied(struct device *dev,
    - dma_addr_t device_addr, size_t size)
    -{
    - struct dma_coherent_mem *mem = dev->dma_mem;
    - int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
    - int pos, err;
    -
    - if (!mem)
    - return ERR_PTR(-EINVAL);
    -
    - pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
    - err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
    - if (err != 0)
    - return ERR_PTR(err);
    - return mem->virt_base + (pos << PAGE_SHIFT);
    -}
    -EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
    diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
    index 7580736..a1a4dc7 100644
    --- a/include/asm-x86/dma-mapping.h
    +++ b/include/asm-x86/dma-mapping.h
    @@ -215,6 +215,14 @@ static inline int dma_get_cache_alignment(void)

    #ifdef CONFIG_X86_32
    # define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
    +struct dma_coherent_mem {
    + void *virt_base;
    + u32 device_base;
    + int size;
    + int flags;
    + unsigned long *bitmap;
    +};
    +
    extern int
    dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
    dma_addr_t device_addr, size_t size, int flags);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  13. [PATCH 05/28] x86: use sg_phys in x86_64

    To make the code usable in i386, where we have high memory mappings,
    we drop te virt_to_bus(sg_virt()) construction in favour of sg_phys.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-nommu_64.c | 2 +-
    1 files changed, 1 insertions(+), 1 deletions(-)

    diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
    index 1da9cf9..c6901e7 100644
    --- a/arch/x86/kernel/pci-nommu_64.c
    +++ b/arch/x86/kernel/pci-nommu_64.c
    @@ -60,7 +60,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,

    for_each_sg(sg, s, nents, i) {
    BUG_ON(!sg_page(s));
    - s->dma_address = virt_to_bus(sg_virt(s));
    + s->dma_address = sg_phys(s);
    if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
    return 0;
    s->dma_length = s->length;
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  14. [PATCH 24/28] x86: unify gfp masks

    Use the same gfp masks for x86_64 and i386.
    It involves using HIGHMEM or DMA32 where necessary, for the sake
    of code compatibility, (no real effect), and using the NORETRY
    mask for i386.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 6 ++++--
    arch/x86/kernel/pci-dma_64.c | 2 ++
    2 files changed, 6 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 11f100a..5450bd1 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -79,7 +79,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
    unsigned long dma_mask = 0;

    /* ignore region specifiers */
    - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
    + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);

    if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret))
    return ret;
    @@ -91,7 +91,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
    if (dma_mask == 0)
    dma_mask = DMA_32BIT_MASK;

    - again:
    + /* Don't invoke OOM killer */
    + gfp |= __GFP_NORETRY;
    +again:
    page = dma_alloc_pages(dev, gfp, order);
    if (page == NULL)
    return NULL;
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index 13a31a4..b956f59 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -49,6 +49,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
    unsigned long dma_mask = 0;
    u64 bus;

    + /* ignore region specifiers */
    + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);

    if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
    return memory;
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  15. [PATCH 08/28] x86: move definition to pci-dma.c

    Move dma_ops structure definition to pci-dma.c, where it
    belongs.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-base_32.c | 11 ++++++++---
    arch/x86/kernel/pci-dma.c | 3 +++
    arch/x86/mm/init_64.c | 3 ---
    3 files changed, 11 insertions(+), 6 deletions(-)

    diff --git a/arch/x86/kernel/pci-base_32.c b/arch/x86/kernel/pci-base_32.c
    index 837bbe9..b44ea51 100644
    --- a/arch/x86/kernel/pci-base_32.c
    +++ b/arch/x86/kernel/pci-base_32.c
    @@ -37,7 +37,7 @@ static int pci32_map_error(dma_addr_t dma_addr)
    return 0;
    }

    -static const struct dma_mapping_ops pci32_dma_ops = {
    +const struct dma_mapping_ops pci32_dma_ops = {
    .map_single = pci32_map_single,
    .unmap_single = NULL,
    .map_sg = pci32_dma_map_sg,
    @@ -51,5 +51,10 @@ static const struct dma_mapping_ops pci32_dma_ops = {
    .mapping_error = pci32_map_error,
    };

    -const struct dma_mapping_ops *dma_ops = &pci32_dma_ops;
    -EXPORT_SYMBOL(dma_ops);
    +/* this is temporary */
    +int __init no_iommu_init(void)
    +{
    + dma_ops = &pci32_dma_ops;
    + return 0;
    +}
    +fs_initcall(no_iommu_init);
    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index f1c24d8..1323cd8 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -1,5 +1,8 @@
    #include

    +const struct dma_mapping_ops *dma_ops;
    +EXPORT_SYMBOL(dma_ops);
    +
    int dma_set_mask(struct device *dev, u64 mask)
    {
    if (!dev->dma_mask || !dma_supported(dev, mask))
    diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
    index 8c989b8..f06a51e 100644
    --- a/arch/x86/mm/init_64.c
    +++ b/arch/x86/mm/init_64.c
    @@ -47,9 +47,6 @@
    #include
    #include

    -const struct dma_mapping_ops *dma_ops;
    -EXPORT_SYMBOL(dma_ops);
    -
    static unsigned long dma_reserve __initdata;

    DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  16. [PATCH 17/28] x86: adjust dma_free_coherent for i386

    We call unmap_single, if available.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 2 ++
    1 files changed, 2 insertions(+), 0 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 78c7640..49166a4 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -84,6 +84,8 @@ void dma_free_coherent(struct device *dev, size_t size,
    WARN_ON(irqs_disabled()); /* for portability */
    if (dma_release_coherent(dev, order, vaddr))
    return;
    + if (dma_ops->unmap_single)
    + dma_ops->unmap_single(dev, dma_handle, size, 0);
    free_pages((unsigned long)vaddr, order);
    }
    EXPORT_SYMBOL(dma_free_coherent);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  17. [PATCH 16/28] x86: isolate coherent mapping functions

    i386 implements the declare coherent memory API, and x86_64 does not
    it is reflected in pieces of dma_alloc_coherent and dma_free_coherent.
    Those pieces are isolated in separate functions, that are declared
    as empty macros in x86_64. This way we can make the code the same.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 51 ++++++++++++++++++++++++++++-------------
    arch/x86/kernel/pci-dma_64.c | 11 ++++++++-
    2 files changed, 45 insertions(+), 17 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 818d95e..78c7640 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -18,27 +18,50 @@
    dma_addr_t bad_dma_address __read_mostly = 0x0;
    EXPORT_SYMBOL(bad_dma_address);

    -void *dma_alloc_coherent(struct device *dev, size_t size,
    - dma_addr_t *dma_handle, gfp_t gfp)
    +static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
    + dma_addr_t *dma_handle, void **ret)
    {
    - void *ret;
    struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    int order = get_order(size);
    - /* ignore region specifiers */
    - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);

    if (mem) {
    int page = bitmap_find_free_region(mem->bitmap, mem->size,
    order);
    if (page >= 0) {
    *dma_handle = mem->device_base + (page << PAGE_SHIFT);
    - ret = mem->virt_base + (page << PAGE_SHIFT);
    - memset(ret, 0, size);
    - return ret;
    + *ret = mem->virt_base + (page << PAGE_SHIFT);
    + memset(*ret, 0, size);
    }
    if (mem->flags & DMA_MEMORY_EXCLUSIVE)
    - return NULL;
    + *ret = NULL;
    + }
    + return (mem != NULL);
    +}
    +
    +static int dma_release_coherent(struct device *dev, int order, void *vaddr)
    +{
    + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    +
    + if (mem && vaddr >= mem->virt_base && vaddr <
    + (mem->virt_base + (mem->size << PAGE_SHIFT))) {
    + int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
    +
    + bitmap_release_region(mem->bitmap, page, order);
    + return 1;
    }
    + return 0;
    +}
    +
    +void *dma_alloc_coherent(struct device *dev, size_t size,
    + dma_addr_t *dma_handle, gfp_t gfp)
    +{
    + void *ret = NULL;
    + int order = get_order(size);
    + /* ignore region specifiers */
    + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
    +
    + if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret))
    + return ret;

    if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
    gfp |= GFP_DMA;
    @@ -56,15 +79,11 @@ EXPORT_SYMBOL(dma_alloc_coherent);
    void dma_free_coherent(struct device *dev, size_t size,
    void *vaddr, dma_addr_t dma_handle)
    {
    - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    int order = get_order(size);

    WARN_ON(irqs_disabled()); /* for portability */
    - if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
    - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
    -
    - bitmap_release_region(mem->bitmap, page, order);
    - } else
    - free_pages((unsigned long)vaddr, order);
    + if (dma_release_coherent(dev, order, vaddr))
    + return;
    + free_pages((unsigned long)vaddr, order);
    }
    EXPORT_SYMBOL(dma_free_coherent);
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index e7d45cf..6eacd58 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -39,6 +39,8 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
    return page ? page_address(page) : NULL;
    }

    +#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
    +#define dma_release_coherent(dev, order, vaddr) (0)
    /*
    * Allocate memory for a coherent mapping.
    */
    @@ -50,6 +52,10 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
    unsigned long dma_mask = 0;
    u64 bus;

    +
    + if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
    + return memory;
    +
    if (!dev)
    dev = &fallback_dev;
    dma_mask = dev->coherent_dma_mask;
    @@ -141,9 +147,12 @@ EXPORT_SYMBOL(dma_alloc_coherent);
    void dma_free_coherent(struct device *dev, size_t size,
    void *vaddr, dma_addr_t bus)
    {
    + int order = get_order(size);
    WARN_ON(irqs_disabled()); /* for portability */
    + if (dma_release_coherent(dev, order, vaddr))
    + return;
    if (dma_ops->unmap_single)
    dma_ops->unmap_single(dev, bus, size, 0);
    - free_pages((unsigned long)vaddr, get_order(size));
    + free_pages((unsigned long)vaddr, order);
    }
    EXPORT_SYMBOL(dma_free_coherent);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  18. [PATCH 28/28] x86: integrate pci-dma.c

    The code in pci-dma_{32,64}.c are now sufficiently
    close to each other. We merge them in pci-dma.c.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/Makefile | 2 +-
    arch/x86/kernel/pci-dma.c | 175 ++++++++++++++++++++++++++++++++++++++++++
    arch/x86/kernel/pci-dma_32.c | 173 -----------------------------------------
    arch/x86/kernel/pci-dma_64.c | 154 -------------------------------------
    4 files changed, 176 insertions(+), 328 deletions(-)
    delete mode 100644 arch/x86/kernel/pci-dma_32.c
    delete mode 100644 arch/x86/kernel/pci-dma_64.c

    diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
    index b2a1358..423e1c4 100644
    --- a/arch/x86/kernel/Makefile
    +++ b/arch/x86/kernel/Makefile
    @@ -23,7 +23,7 @@ obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o
    obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
    obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
    obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o
    -obj-y += pci-dma_$(BITS).o bootflag.o e820_$(BITS).o
    +obj-y += bootflag.o e820_$(BITS).o
    obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
    obj-y += alternative.o i8253.o pci-nommu.o
    obj-$(CONFIG_X86_64) += bugs_64.o
    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index d6734ed..5cc8d5a 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -38,6 +38,15 @@ EXPORT_SYMBOL(iommu_bio_merge);
    dma_addr_t bad_dma_address __read_mostly = 0;
    EXPORT_SYMBOL(bad_dma_address);

    +/* Dummy device used for NULL arguments (normally ISA). Better would
    + be probably a smaller DMA mask, but this is bug-to-bug compatible
    + to older i386. */
    +struct device fallback_dev = {
    + .bus_id = "fallback device",
    + .coherent_dma_mask = DMA_32BIT_MASK,
    + .dma_mask = &fallback_dev.coherent_dma_mask,
    +};
    +
    int dma_set_mask(struct device *dev, u64 mask)
    {
    if (!dev->dma_mask || !dma_supported(dev, mask))
    @@ -128,6 +137,43 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
    return mem->virt_base + (pos << PAGE_SHIFT);
    }
    EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
    +
    +static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
    + dma_addr_t *dma_handle, void **ret)
    +{
    + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    + int order = get_order(size);
    +
    + if (mem) {
    + int page = bitmap_find_free_region(mem->bitmap, mem->size,
    + order);
    + if (page >= 0) {
    + *dma_handle = mem->device_base + (page << PAGE_SHIFT);
    + *ret = mem->virt_base + (page << PAGE_SHIFT);
    + memset(*ret, 0, size);
    + }
    + if (mem->flags & DMA_MEMORY_EXCLUSIVE)
    + *ret = NULL;
    + }
    + return (mem != NULL);
    +}
    +
    +static int dma_release_coherent(struct device *dev, int order, void *vaddr)
    +{
    + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    +
    + if (mem && vaddr >= mem->virt_base && vaddr <
    + (mem->virt_base + (mem->size << PAGE_SHIFT))) {
    + int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
    +
    + bitmap_release_region(mem->bitmap, page, order);
    + return 1;
    + }
    + return 0;
    +}
    +#else
    +#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
    +#define dma_release_coherent(dev, order, vaddr) (0)
    #endif /* CONFIG_X86_32 */

    int dma_supported(struct device *dev, u64 mask)
    @@ -171,6 +217,135 @@ int dma_supported(struct device *dev, u64 mask)
    }
    EXPORT_SYMBOL(dma_supported);

    +/* Allocate DMA memory on node near device */
    +noinline struct page *
    +dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
    +{
    + int node;
    +
    + node = dev_to_node(dev);
    +
    + return alloc_pages_node(node, gfp, order);
    +}
    +
    +/*
    + * Allocate memory for a coherent mapping.
    + */
    +void *
    +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
    + gfp_t gfp)
    +{
    + void *memory = NULL;
    + struct page *page;
    + unsigned long dma_mask = 0;
    + dma_addr_t bus;
    +
    + /* ignore region specifiers */
    + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
    +
    + if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
    + return memory;
    +
    + if (!dev)
    + dev = &fallback_dev;
    + dma_mask = dev->coherent_dma_mask;
    + if (dma_mask == 0)
    + dma_mask = DMA_32BIT_MASK;
    +
    + /* Device not DMA able */
    + if (dev->dma_mask == NULL)
    + return NULL;
    +
    + /* Don't invoke OOM killer */
    + gfp |= __GFP_NORETRY;
    +
    +#ifdef CONFIG_X86_64
    + /* Why <=? Even when the mask is smaller than 4GB it is often
    + larger than 16MB and in this case we have a chance of
    + finding fitting memory in the next higher zone first. If
    + not retry with true GFP_DMA. -AK */
    + if (dma_mask <= DMA_32BIT_MASK)
    + gfp |= GFP_DMA32;
    +#endif
    +
    + again:
    + page = dma_alloc_pages(dev, gfp, get_order(size));
    + if (page == NULL)
    + return NULL;
    +
    + {
    + int high, mmu;
    + bus = page_to_phys(page);
    + memory = page_address(page);
    + high = (bus + size) >= dma_mask;
    + mmu = high;
    + if (force_iommu && !(gfp & GFP_DMA))
    + mmu = 1;
    + else if (high) {
    + free_pages((unsigned long)memory,
    + get_order(size));
    +
    + /* Don't use the 16MB ZONE_DMA unless absolutely
    + needed. It's better to use remapping first. */
    + if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
    + gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
    + goto again;
    + }
    +
    + /* Let low level make its own zone decisions */
    + gfp &= ~(GFP_DMA32|GFP_DMA);
    +
    + if (dma_ops->alloc_coherent)
    + return dma_ops->alloc_coherent(dev, size,
    + dma_handle, gfp);
    + return NULL;
    + }
    +
    + memset(memory, 0, size);
    + if (!mmu) {
    + *dma_handle = bus;
    + return memory;
    + }
    + }
    +
    + if (dma_ops->alloc_coherent) {
    + free_pages((unsigned long)memory, get_order(size));
    + gfp &= ~(GFP_DMA|GFP_DMA32);
    + return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
    + }
    +
    + if (dma_ops->map_simple) {
    + *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
    + size,
    + PCI_DMA_BIDIRECTIONAL);
    + if (*dma_handle != bad_dma_address)
    + return memory;
    + }
    +
    + if (panic_on_overflow)
    + panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
    + (unsigned long)size);
    + free_pages((unsigned long)memory, get_order(size));
    + return NULL;
    +}
    +EXPORT_SYMBOL(dma_alloc_coherent);
    +
    +/*
    + * Unmap coherent memory.
    + * The caller must ensure that the device has finished accessing the mapping.
    + */
    +void dma_free_coherent(struct device *dev, size_t size,
    + void *vaddr, dma_addr_t bus)
    +{
    + int order = get_order(size);
    + WARN_ON(irqs_disabled()); /* for portability */
    + if (dma_release_coherent(dev, order, vaddr))
    + return;
    + if (dma_ops->unmap_single)
    + dma_ops->unmap_single(dev, bus, size, 0);
    + free_pages((unsigned long)vaddr, order);
    +}
    +EXPORT_SYMBOL(dma_free_coherent);

    static int __init pci_iommu_init(void)
    {
    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    deleted file mode 100644
    index d2f7074..0000000
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ /dev/null
    @@ -1,173 +0,0 @@
    -/*
    - * Dynamic DMA mapping support.
    - *
    - * On i386 there is no hardware dynamic DMA address translation,
    - * so consistent alloc/free are merely page allocation/freeing.
    - * The rest of the dynamic DMA mapping interface is implemented
    - * in asm/pci.h.
    - */
    -
    -#include
    -#include
    -#include
    -#include
    -#include
    -#include
    -
    -/* Dummy device used for NULL arguments (normally ISA). Better would
    - be probably a smaller DMA mask, but this is bug-to-bug compatible
    - to i386. */
    -struct device fallback_dev = {
    - .bus_id = "fallback device",
    - .coherent_dma_mask = DMA_32BIT_MASK,
    - .dma_mask = &fallback_dev.coherent_dma_mask,
    -};
    -
    -
    -static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
    - dma_addr_t *dma_handle, void **ret)
    -{
    - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    - int order = get_order(size);
    -
    - if (mem) {
    - int page = bitmap_find_free_region(mem->bitmap, mem->size,
    - order);
    - if (page >= 0) {
    - *dma_handle = mem->device_base + (page << PAGE_SHIFT);
    - *ret = mem->virt_base + (page << PAGE_SHIFT);
    - memset(*ret, 0, size);
    - }
    - if (mem->flags & DMA_MEMORY_EXCLUSIVE)
    - *ret = NULL;
    - }
    - return (mem != NULL);
    -}
    -
    -static int dma_release_coherent(struct device *dev, int order, void *vaddr)
    -{
    - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    -
    - if (mem && vaddr >= mem->virt_base && vaddr <
    - (mem->virt_base + (mem->size << PAGE_SHIFT))) {
    - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
    -
    - bitmap_release_region(mem->bitmap, page, order);
    - return 1;
    - }
    - return 0;
    -}
    -
    -/* Allocate DMA memory on node near device */
    -noinline struct page *
    -dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
    -{
    - int node;
    -
    - node = dev_to_node(dev);
    -
    - return alloc_pages_node(node, gfp, order);
    -}
    -
    -void *dma_alloc_coherent(struct device *dev, size_t size,
    - dma_addr_t *dma_handle, gfp_t gfp)
    -{
    - void *ret = NULL;
    - struct page *page;
    - dma_addr_t bus;
    - int order = get_order(size);
    - unsigned long dma_mask = 0;
    -
    - /* ignore region specifiers */
    - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
    -
    - if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret))
    - return ret;
    -
    - if (!dev)
    - dev = &fallback_dev;
    -
    - dma_mask = dev->coherent_dma_mask;
    - if (dma_mask == 0)
    - dma_mask = DMA_32BIT_MASK;
    -
    - if (dev->dma_mask == NULL)
    - return NULL;
    -
    - /* Don't invoke OOM killer */
    - gfp |= __GFP_NORETRY;
    -again:
    - page = dma_alloc_pages(dev, gfp, order);
    - if (page == NULL)
    - return NULL;
    -
    - {
    - int high, mmu;
    - bus = page_to_phys(page);
    - ret = page_address(page);
    - high = (bus + size) >= dma_mask;
    - mmu = high;
    - if (force_iommu && !(gfp & GFP_DMA))
    - mmu = 1;
    - else if (high) {
    - free_pages((unsigned long)ret,
    - get_order(size));
    -
    - /* Don't use the 16MB ZONE_DMA unless absolutely
    - needed. It's better to use remapping first. */
    - if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
    - gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
    - goto again;
    - }
    -
    - /* Let low level make its own zone decisions */
    - gfp &= ~(GFP_DMA32|GFP_DMA);
    -
    - if (dma_ops->alloc_coherent)
    - return dma_ops->alloc_coherent(dev, size,
    - dma_handle, gfp);
    - return NULL;
    -
    - }
    - memset(ret, 0, size);
    - if (!mmu) {
    - *dma_handle = bus;
    - return ret;
    - }
    - }
    -
    - if (dma_ops->alloc_coherent) {
    - free_pages((unsigned long)ret, get_order(size));
    - gfp &= ~(GFP_DMA|GFP_DMA32);
    - return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
    - }
    -
    - if (dma_ops->map_simple) {
    - *dma_handle = dma_ops->map_simple(dev, virt_to_phys(ret),
    - size,
    - PCI_DMA_BIDIRECTIONAL);
    - if (*dma_handle != bad_dma_address)
    - return ret;
    - }
    -
    - if (panic_on_overflow)
    - panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
    - (unsigned long)size);
    - free_pages((unsigned long)ret, get_order(size));
    - return NULL;
    -}
    -EXPORT_SYMBOL(dma_alloc_coherent);
    -
    -void dma_free_coherent(struct device *dev, size_t size,
    - void *vaddr, dma_addr_t dma_handle)
    -{
    - int order = get_order(size);
    -
    - WARN_ON(irqs_disabled()); /* for portability */
    - if (dma_release_coherent(dev, order, vaddr))
    - return;
    - if (dma_ops->unmap_single)
    - dma_ops->unmap_single(dev, dma_handle, size, 0);
    - free_pages((unsigned long)vaddr, order);
    -}
    -EXPORT_SYMBOL(dma_free_coherent);
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    deleted file mode 100644
    index 596c8c8..0000000
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ /dev/null
    @@ -1,154 +0,0 @@
    -/*
    - * Dynamic DMA mapping support.
    - */
    -
    -#include
    -#include
    -#include
    -#include
    -#include
    -#include
    -#include
    -#include
    -#include
    -#include
    -#include
    -
    -
    -/* Dummy device used for NULL arguments (normally ISA). Better would
    - be probably a smaller DMA mask, but this is bug-to-bug compatible
    - to i386. */
    -struct device fallback_dev = {
    - .bus_id = "fallback device",
    - .coherent_dma_mask = DMA_32BIT_MASK,
    - .dma_mask = &fallback_dev.coherent_dma_mask,
    -};
    -
    -/* Allocate DMA memory on node near device */
    -noinline static void *
    -dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
    -{
    - int node;
    -
    - node = dev_to_node(dev);
    -
    - return alloc_pages_node(node, gfp, order);
    -}
    -
    -#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
    -#define dma_release_coherent(dev, order, vaddr) (0)
    -/*
    - * Allocate memory for a coherent mapping.
    - */
    -void *
    -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
    - gfp_t gfp)
    -{
    - void *memory;
    - struct page *page;
    - unsigned long dma_mask = 0;
    - u64 bus;
    -
    - /* ignore region specifiers */
    - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
    -
    - if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
    - return memory;
    -
    - if (!dev)
    - dev = &fallback_dev;
    - dma_mask = dev->coherent_dma_mask;
    - if (dma_mask == 0)
    - dma_mask = DMA_32BIT_MASK;
    -
    - /* Device not DMA able */
    - if (dev->dma_mask == NULL)
    - return NULL;
    -
    - /* Don't invoke OOM killer */
    - gfp |= __GFP_NORETRY;
    -
    - /* Why <=? Even when the mask is smaller than 4GB it is often
    - larger than 16MB and in this case we have a chance of
    - finding fitting memory in the next higher zone first. If
    - not retry with true GFP_DMA. -AK */
    - if (dma_mask <= DMA_32BIT_MASK)
    - gfp |= GFP_DMA32;
    -
    - again:
    - page = dma_alloc_pages(dev, gfp, get_order(size));
    - if (page == NULL)
    - return NULL;
    -
    - {
    - int high, mmu;
    - bus = page_to_phys(page);
    - memory = page_address(page);
    - high = (bus + size) >= dma_mask;
    - mmu = high;
    - if (force_iommu && !(gfp & GFP_DMA))
    - mmu = 1;
    - else if (high) {
    - free_pages((unsigned long)memory,
    - get_order(size));
    -
    - /* Don't use the 16MB ZONE_DMA unless absolutely
    - needed. It's better to use remapping first. */
    - if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
    - gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
    - goto again;
    - }
    -
    - /* Let low level make its own zone decisions */
    - gfp &= ~(GFP_DMA32|GFP_DMA);
    -
    - if (dma_ops->alloc_coherent)
    - return dma_ops->alloc_coherent(dev, size,
    - dma_handle, gfp);
    - return NULL;
    - }
    -
    - memset(memory, 0, size);
    - if (!mmu) {
    - *dma_handle = bus;
    - return memory;
    - }
    - }
    -
    - if (dma_ops->alloc_coherent) {
    - free_pages((unsigned long)memory, get_order(size));
    - gfp &= ~(GFP_DMA|GFP_DMA32);
    - return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
    - }
    -
    - if (dma_ops->map_simple) {
    - *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
    - size,
    - PCI_DMA_BIDIRECTIONAL);
    - if (*dma_handle != bad_dma_address)
    - return memory;
    - }
    -
    - if (panic_on_overflow)
    - panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
    - free_pages((unsigned long)memory, get_order(size));
    - return NULL;
    -}
    -EXPORT_SYMBOL(dma_alloc_coherent);
    -
    -/*
    - * Unmap coherent memory.
    - * The caller must ensure that the device has finished accessing the mapping.
    - */
    -void dma_free_coherent(struct device *dev, size_t size,
    - void *vaddr, dma_addr_t bus)
    -{
    - int order = get_order(size);
    - WARN_ON(irqs_disabled()); /* for portability */
    - if (dma_release_coherent(dev, order, vaddr))
    - return;
    - if (dma_ops->unmap_single)
    - dma_ops->unmap_single(dev, bus, size, 0);
    - free_pages((unsigned long)vaddr, order);
    -}
    -EXPORT_SYMBOL(dma_free_coherent);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  19. [PATCH 09/28] x86: move initialization functions to pci-dma.c

    initcalls that triggers the various possibiities for
    dma subsys are moved to pci-dma.c.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma.c | 25 +++++++++++++++++++++++++
    arch/x86/kernel/pci-dma_64.c | 23 -----------------------
    2 files changed, 25 insertions(+), 23 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
    index 1323cd8..d30634b 100644
    --- a/arch/x86/kernel/pci-dma.c
    +++ b/arch/x86/kernel/pci-dma.c
    @@ -1,4 +1,8 @@
    #include
    +#include
    +
    +#include
    +#include

    const struct dma_mapping_ops *dma_ops;
    EXPORT_SYMBOL(dma_ops);
    @@ -14,4 +18,25 @@ int dma_set_mask(struct device *dev, u64 mask)
    }
    EXPORT_SYMBOL(dma_set_mask);

    +static int __init pci_iommu_init(void)
    +{
    +#ifdef CONFIG_CALGARY_IOMMU
    + calgary_iommu_init();
    +#endif
    +
    + intel_iommu_init();

    +#ifdef CONFIG_GART_IOMMU
    + gart_iommu_init();
    +#endif
    +
    + no_iommu_init();
    + return 0;
    +}
    +
    +void pci_iommu_shutdown(void)
    +{
    + gart_iommu_shutdown();
    +}
    +/* Must execute after PCI subsystem */
    +fs_initcall(pci_iommu_init);
    diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
    index e697b86..e95f671 100644
    --- a/arch/x86/kernel/pci-dma_64.c
    +++ b/arch/x86/kernel/pci-dma_64.c
    @@ -347,27 +347,6 @@ void __init pci_iommu_alloc(void)
    #endif
    }

    -static int __init pci_iommu_init(void)
    -{
    -#ifdef CONFIG_CALGARY_IOMMU
    - calgary_iommu_init();
    -#endif
    -
    - intel_iommu_init();
    -
    -#ifdef CONFIG_GART_IOMMU
    - gart_iommu_init();
    -#endif
    -
    - no_iommu_init();
    - return 0;
    -}
    -
    -void pci_iommu_shutdown(void)
    -{
    - gart_iommu_shutdown();
    -}
    -
    #ifdef CONFIG_PCI
    /* Many VIA bridges seem to corrupt data for DAC. Disable it here */

    @@ -380,5 +359,3 @@ static __devinit void via_no_dac(struct pci_dev *dev)
    }
    DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
    #endif
    -/* Must execute after PCI subsystem */
    -fs_initcall(pci_iommu_init);
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

  20. [PATCH 21/28] x86: retry allocation if failed

    This patch puts in the code to retry allocation in case it fails. By its
    own, it does not make much sense but making the code look like x86_64.
    But later patches in this series will make we try to allocate from
    zones other than DMA first, which will possibly fail.

    Signed-off-by: Glauber Costa
    ---
    arch/x86/kernel/pci-dma_32.c | 34 +++++++++++++++++++++++++++++-----
    1 files changed, 29 insertions(+), 5 deletions(-)

    diff --git a/arch/x86/kernel/pci-dma_32.c b/arch/x86/kernel/pci-dma_32.c
    index 0d630ae..f6cf434 100644
    --- a/arch/x86/kernel/pci-dma_32.c
    +++ b/arch/x86/kernel/pci-dma_32.c
    @@ -66,6 +66,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
    struct page *page;
    dma_addr_t bus;
    int order = get_order(size);
    + unsigned long dma_mask = 0;
    +
    /* ignore region specifiers */
    gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);

    @@ -75,15 +77,37 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
    if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
    gfp |= GFP_DMA;

    + dma_mask = dev->coherent_dma_mask;
    + if (dma_mask == 0)
    + dma_mask = DMA_32BIT_MASK;
    +
    + again:
    page = dma_alloc_pages(dev, gfp, order);
    if (page == NULL)
    return NULL;

    - ret = page_address(page);
    - bus = page_to_phys(page);
    -
    - memset(ret, 0, size);
    - *dma_handle = bus;
    + {
    + int high, mmu;
    + bus = page_to_phys(page);
    + ret = page_address(page);
    + high = (bus + size) >= dma_mask;
    + mmu = high;
    + if (force_iommu && !(gfp & GFP_DMA))
    + mmu = 1;
    + else if (high) {
    + free_pages((unsigned long)ret,
    + get_order(size));
    +
    + /* Don't use the 16MB ZONE_DMA unless absolutely
    + needed. It's better to use remapping first. */
    + if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
    + gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
    + goto again;
    + }
    + }
    + memset(ret, 0, size);
    + *dma_handle = bus;
    + }

    return ret;
    }
    --
    1.5.0.6

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

+ Reply to Thread
Page 1 of 2 1 2 LastLast