+++ /dev/null
---- a/arch/arm/Kconfig
-+++ b/arch/arm/Kconfig
-@@ -435,7 +435,6 @@ config ARCH_IXP4XX
- select CPU_XSCALE
- select GENERIC_GPIO
- select GENERIC_CLOCKEVENTS
-- select DMABOUNCE if PCI
- help
- Support for Intel's IXP4XX (XScale) family of processors.
-
---- a/arch/arm/mach-ixp4xx/Kconfig
-+++ b/arch/arm/mach-ixp4xx/Kconfig
-@@ -199,6 +199,43 @@ config IXP4XX_INDIRECT_PCI
- need to use the indirect method instead. If you don't know
- what you need, leave this option unselected.
-
-+config IXP4XX_LEGACY_DMABOUNCE
-+ bool "Legacy PCI DMA bounce support"
-+ depends on PCI
-+ default n
-+ select DMABOUNCE
-+ help
-+ The IXP4xx is limited to a 64MB window for PCI DMA, which
-+ requires that PCI accesses >= 64MB are bounced via buffers
-+ below 64MB.
-+
-+ The kernel has traditionally handled this issue by using ARM
-+ specific DMA bounce support code for all accesses >= 64MB.
-+ That code causes problems of its own, so it is desirable to
-+ disable it.
-+
-+ Enabling this option makes IXP4xx continue to use the problematic
-+ ARM DMA bounce code. Disabling this option makes IXP4xx use the
-+ kernel's generic bounce code.
-+
-+ Say 'N'.
-+
-+config IXP4XX_ZONE_DMA
-+ bool "Support > 64MB RAM"
-+ depends on !IXP4XX_LEGACY_DMABOUNCE
-+ default y
-+ select ZONE_DMA
-+ help
-+ The IXP4xx is limited to a 64MB window for PCI DMA, which
-+ requires that PCI accesses above 64MB are bounced via buffers
-+ below 64MB.
-+
-+ Disabling this option allows you to omit the support code for
-+ DMA-able memory allocations and DMA bouncing, but the kernel
-+ will then not work properly if more than 64MB of RAM is present.
-+
-+ Say 'Y' unless your platform is limited to <= 64MB of RAM.
-+
- config IXP4XX_QMGR
- tristate "IXP4xx Queue Manager support"
- help
---- a/arch/arm/mach-ixp4xx/common-pci.c
-+++ b/arch/arm/mach-ixp4xx/common-pci.c
-@@ -321,27 +321,33 @@ static int abort_handler(unsigned long a
- */
- static int ixp4xx_pci_platform_notify(struct device *dev)
- {
-- if(dev->bus == &pci_bus_type) {
-- *dev->dma_mask = SZ_64M - 1;
-+ if (dev->bus == &pci_bus_type) {
-+ *dev->dma_mask = SZ_64M - 1;
- dev->coherent_dma_mask = SZ_64M - 1;
-+#ifdef CONFIG_DMABOUNCE
- dmabounce_register_dev(dev, 2048, 4096);
-+#endif
- }
- return 0;
- }
-
- static int ixp4xx_pci_platform_notify_remove(struct device *dev)
- {
-- if(dev->bus == &pci_bus_type) {
-+#ifdef CONFIG_DMABOUNCE
-+ if (dev->bus == &pci_bus_type)
- dmabounce_unregister_dev(dev);
-- }
-+#endif
- return 0;
- }
-
--int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
-+#ifdef CONFIG_DMABOUNCE
-+int dma_needs_bounce_2(struct device *dev, dma_addr_t dma_addr, size_t size)
- {
-- return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
-+ return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
- }
-+#endif
-
-+#ifdef CONFIG_ZONE_DMA
- /*
- * Only first 64MB of memory can be accessed via PCI.
- * We use GFP_DMA to allocate safe buffers to do map/unmap.
-@@ -364,6 +370,7 @@ void __init ixp4xx_adjust_zones(unsigned
- zhole_size[1] = zhole_size[0];
- zhole_size[0] = 0;
- }
-+#endif
-
- void __init ixp4xx_pci_preinit(void)
- {
---- a/arch/arm/mach-ixp4xx/include/mach/memory.h
-+++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
-@@ -16,10 +16,12 @@
-
- #if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
-
-+#ifdef CONFIG_ZONE_DMA
- void ixp4xx_adjust_zones(unsigned long *size, unsigned long *holes);
-
- #define arch_adjust_zones(size, holes) \
- ixp4xx_adjust_zones(size, holes)
-+#endif
-
- #define ISA_DMA_THRESHOLD (SZ_64M - 1)
- #define MAX_DMA_ADDRESS (PAGE_OFFSET + SZ_64M)
---- a/arch/arm/common/dmabounce.c
-+++ b/arch/arm/common/dmabounce.c
-@@ -30,6 +30,7 @@
- #include <linux/dma-mapping.h>
- #include <linux/dmapool.h>
- #include <linux/list.h>
-+#include <linux/pci.h>
- #include <linux/scatterlist.h>
-
- #include <asm/cacheflush.h>
-@@ -248,7 +249,13 @@ static inline dma_addr_t map_single(stru
- needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
- }
-
-- if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
-+#ifdef CONFIG_DMABOUNCE
-+int dma_needs_bounce_2(struct device *dev, dma_addr_t dma_addr, size_t size)
-+{
-+ return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
-+}
-+
-+ if (device_info && (needs_bounce || dma_needs_bounce_2(dev, dma_addr, size))) {
- struct safe_buffer *buf;
-
- buf = alloc_safe_buffer(device_info, ptr, size, dir);
-@@ -282,6 +289,7 @@ static inline dma_addr_t map_single(stru
-
- return dma_addr;
- }
-+#endif
-
- static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir)