unsigned int flags, struct exynos_drm_gem_buf *buf)
{
int ret = 0;
- unsigned int npages, i = 0;
- struct scatterlist *sgl;
enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
DRM_DEBUG_KMS("%s\n", __FILE__);
goto err_free_sgt;
}
- npages = buf->sgt->nents;
-
- buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
- if (!buf->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err_free_table;
- }
-
- sgl = buf->sgt->sgl;
- while (i < npages) {
- buf->pages[i] = sg_page(sgl);
- sgl = sg_next(sgl);
- i++;
- }
-
DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->kvaddr,
(unsigned long)buf->dma_addr,
return ret;
-err_free_table:
- sg_free_table(buf->sgt);
err_free_sgt:
kfree(buf->sgt);
buf->sgt = NULL;
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
int exynos_drm_alloc_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buf,
unsigned int flags);
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
void exynos_drm_free_buf(struct drm_device *dev,
unsigned int flags,
struct exynos_drm_gem_buf *buffer);
goto err_unlock;
}
- DRM_DEBUG_PRIME("buffer size = 0x%lx page_size = 0x%lx\n",
- buf->size, buf->page_size);
+ DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
unsigned long pfn;
int i;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- if (!buf->sgt)
- return -EINTR;
-
- sgl = buf->sgt->sgl;
- for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
- if (!sgl) {
- DRM_ERROR("invalid SG table\n");
- return -EINTR;
- }
- if (page_offset < (sgl->length >> PAGE_SHIFT))
- break;
- page_offset -= (sgl->length >> PAGE_SHIFT);
- }
-
- if (i >= buf->sgt->nents) {
- DRM_ERROR("invalid page offset\n");
- return -EINVAL;
- }
+ if (!buf->sgt)
+ return -EINTR;
- pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
- } else {
- if (!buf->pages)
- return -EINTR;
+ if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+ DRM_ERROR("invalid page offset\n");
+ return -EINVAL;
+ }
- pfn = page_to_pfn(buf->pages[0]) + page_offset;
+ sgl = buf->sgt->sgl;
+ for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+ if (page_offset < (sgl->length >> PAGE_SHIFT))
+ break;
+ page_offset -= (sgl->length >> PAGE_SHIFT);
}
+ pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+
return vm_insert_mixed(vma, f_vaddr, pfn);
}
* device address with IOMMU.
* @write: whether pages will be written to by the caller.
* @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
* @size: size of allocated memory region.
* @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not.
struct dma_attrs dma_attrs;
unsigned int write;
struct sg_table *sgt;
- struct page **pages;
- unsigned long page_size;
unsigned long size;
bool pfnmap;
};