phys_to_page and __phys_to_pfn don't exist on all platforms.
Use a combination of pfn_to_page, PFN_DOWN, page_to_pfn, and
virt_to_page to get the same results.
Signed-off-by: Colin Cross <ccross@android.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kfree(table);
return ERR_PTR(ret);
}
- sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
- 0);
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(buffer->priv_phys)),
+ buffer->size, 0);
return table;
}
chunk_heap->chunk_size);
if (!paddr)
goto err;
- sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
+ sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+ chunk_heap->chunk_size, 0);
sg = sg_next(sg);
}
goto error;
}
for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
- struct page *page = phys_to_page(chunk_heap->base + i);
+ struct page *page = pfn_to_page(PFN_DOWN(chunk_heap->base + i));
struct page **pages = &page;
ret = map_vm_area(vm_struct, pgprot, &pages);
struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
- unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
+ unsigned long pfn = page_to_pfn(virt_to_page(buffer->priv_virt));
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);