* to ensure it is consistent.
*/
struct device;
-extern void *__dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp);
-extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
* Cache coherent cores.
*/
-#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
-#define __dma_free_coherent(size, addr) ((void)0)
#define __dma_sync(addr, size, rw) ((void)0)
#define __dma_sync_page(pg, off, sz, rw) ((void)0)
#endif
}
+#ifndef CONFIG_NOT_COHERENT_CACHE
void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
void *ret;
-#ifdef CONFIG_NOT_COHERENT_CACHE
- ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
- if (ret == NULL)
- return NULL;
- *dma_handle += get_dma_offset(dev);
- return ret;
-#else
struct page *page;
int node = dev_to_node(dev);
#ifdef CONFIG_FSL_SOC
*dma_handle = __pa(ret) + get_dma_offset(dev);
return ret;
-#endif
}
void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
-#ifdef CONFIG_NOT_COHERENT_CACHE
- __dma_free_coherent(size, vaddr);
-#else
free_pages((unsigned long)vaddr, get_order(size));
-#endif
}
+#endif /* !CONFIG_NOT_COHERENT_CACHE */
static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
#include <linux/string.h>
#include <linux/types.h>
#include <linux/highmem.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/export.h>
#include <asm/tlbflush.h>
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *
-__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct page *page;
struct ppc_vm_region *c;
/*
* Set the "dma handle"
*/
- *handle = page_to_phys(page);
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
do {
SetPageReserved(page);
no_page:
return NULL;
}
-EXPORT_SYMBOL(__dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
*/
-void __dma_free_coherent(size_t size, void *vaddr)
+void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct ppc_vm_region *c;
unsigned long flags, addr;
__func__, vaddr);
dump_stack();
}
-EXPORT_SYMBOL(__dma_free_coherent);
/*
* make an area consistent.
/*
* Return the PFN for a given cpu virtual address returned by
- * __dma_alloc_coherent. This is used by dma_mmap_coherent()
+ * __dma_nommu_alloc_coherent. This is used by dma_mmap_coherent()
*/
unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
{
if (!of_machine_is_compatible("pika,warp"))
return 0;
- /* For __dma_alloc_coherent */
+ /* For __dma_nommu_alloc_coherent */
ISA_DMA_THRESHOLD = ~0L;
return 1;