From bb112b14af8d1910909eb132c78ddf49be907e31 Mon Sep 17 00:00:00 2001 From: Haneen Mohammed Date: Tue, 24 Jul 2018 19:26:59 +0300 Subject: [PATCH] drm/vkms: Add functions to map/unmap GEM backing storage This patch add the necessary functions to map/unmap GEM backing memory to the kernel's virtual address space. Signed-off-by: Haneen Mohammed Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/4b6563ae4f4337a5fd51f872424addf64e8d59a6.1532446182.git.hamohammed.sa@gmail.com --- drivers/gpu/drm/vkms/vkms_drv.h | 9 ++++ drivers/gpu/drm/vkms/vkms_gem.c | 79 ++++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 07be29f2dc44..47048f707566 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -39,6 +39,8 @@ struct vkms_gem_object { struct drm_gem_object gem; struct mutex pages_lock; /* Page lock used in page fault handler */ struct page **pages; + unsigned int vmap_count; + void *vaddr; }; #define drm_crtc_to_vkms_output(target) \ @@ -47,6 +49,9 @@ struct vkms_gem_object { #define drm_device_to_vkms_device(target) \ container_of(target, struct vkms_device, drm) +#define drm_gem_to_vkms_gem(target)\ + container_of(target, struct vkms_gem_object, gem) + /* CRTC */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor); @@ -75,4 +80,8 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, void vkms_gem_free_object(struct drm_gem_object *obj); +int vkms_gem_vmap(struct drm_gem_object *obj); + +void vkms_gem_vunmap(struct drm_gem_object *obj); + #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index c7e38368602b..a52e88338c05 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj) struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object, gem); - kvfree(gem->pages); + WARN_ON(gem->pages); + WARN_ON(gem->vaddr); + mutex_destroy(&gem->pages_lock); drm_gem_object_release(obj); kfree(gem); @@ -177,3 +179,78 @@ unref: return ret; } + +static struct page **_get_pages(struct vkms_gem_object *vkms_obj) +{ + struct drm_gem_object *gem_obj = &vkms_obj->gem; + + if (!vkms_obj->pages) { + struct page **pages = drm_gem_get_pages(gem_obj); + + if (IS_ERR(pages)) + return pages; + + if (cmpxchg(&vkms_obj->pages, NULL, pages)) + drm_gem_put_pages(gem_obj, pages, false, true); + } + + return vkms_obj->pages; +} + +void vkms_gem_vunmap(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); + + mutex_lock(&vkms_obj->pages_lock); + if (vkms_obj->vmap_count < 1) { + WARN_ON(vkms_obj->vaddr); + WARN_ON(vkms_obj->pages); + mutex_unlock(&vkms_obj->pages_lock); + return; + } + + vkms_obj->vmap_count--; + + if (vkms_obj->vmap_count == 0) { + vunmap(vkms_obj->vaddr); + vkms_obj->vaddr = NULL; + drm_gem_put_pages(obj, vkms_obj->pages, false, true); + vkms_obj->pages = NULL; + } + + mutex_unlock(&vkms_obj->pages_lock); +} + +int vkms_gem_vmap(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); + int ret = 0; + + mutex_lock(&vkms_obj->pages_lock); + + if (!vkms_obj->vaddr) { + unsigned int n_pages = obj->size >> PAGE_SHIFT; + struct page **pages = _get_pages(vkms_obj); + + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out; + } + + vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); + if (!vkms_obj->vaddr) + goto err_vmap; + + vkms_obj->vmap_count++; + } + + goto out; + +err_vmap: + ret = -ENOMEM; + drm_gem_put_pages(obj, vkms_obj->pages, false, true); + vkms_obj->pages = NULL; +out: + mutex_unlock(&vkms_obj->pages_lock); + return ret; +} -- 2.30.2