From 8bdcd949bbe7e7f9e60a3564baa600884f8f4ba7 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Tue, 13 Jun 2017 11:07:08 -0400 Subject: [PATCH] drm/msm: pass address-space to _get_iova() and friends No functional change, that will come later. But this will make it easier to deal with dynamically created address spaces (ie. per- process pagetables for gpu). Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 8 ++++---- drivers/gpu/drm/msm/adreno/a5xx_power.c | 5 +++-- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 6 +++--- drivers/gpu/drm/msm/dsi/dsi_host.c | 4 ++-- drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 6 +++--- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 4 ++-- drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 12 ++++++------ drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 4 ++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 12 ++++++------ drivers/gpu/drm/msm/msm_drv.c | 7 +++++-- drivers/gpu/drm/msm/msm_drv.h | 22 ++++++++++++++-------- drivers/gpu/drm/msm/msm_fb.c | 15 +++++++++------ drivers/gpu/drm/msm/msm_fbdev.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 18 ++++++++++++------ drivers/gpu/drm/msm/msm_gem.h | 1 + drivers/gpu/drm/msm/msm_gem_submit.c | 4 ++-- drivers/gpu/drm/msm/msm_gpu.c | 6 +++--- 17 files changed, 78 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 8d17f525c417..f6a9eec71fec 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -308,7 +308,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, } if (iova) { - int ret = msm_gem_get_iova_locked(bo, gpu->id, iova); + int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova); if (ret) { drm_gem_object_unreference(bo); @@ -696,19 +696,19 @@ static void a5xx_destroy(struct msm_gpu *gpu) if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_iova) - msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id); + msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo); } if (a5xx_gpu->pfp_bo) { if (a5xx_gpu->pfp_iova) - msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id); + msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace); drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo); } if (a5xx_gpu->gpmu_bo) { if (a5xx_gpu->gpmu_iova) - msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); + msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index f3274b827a49..feb7f4fd42fb 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -298,7 +298,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) if (IS_ERR(a5xx_gpu->gpmu_bo)) goto err; - if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova)) + if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace, + &a5xx_gpu->gpmu_iova)) goto err; ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo); @@ -327,7 +328,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) err: if (a5xx_gpu->gpmu_iova) - msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); + msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); if (a5xx_gpu->gpmu_bo) drm_gem_object_unreference(a5xx_gpu->gpmu_bo); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 30a2096ac9a2..6fa694e6ae8c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu) DBG("%s", gpu->name); - ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); + ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); if (ret) { gpu->rb_iova = 0; dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); @@ -414,7 +414,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return -ENOMEM; } - ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id, + ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace, &adreno_gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); @@ -433,7 +433,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) msm_gem_put_vaddr(adreno_gpu->memptrs_bo); if (adreno_gpu->memptrs_iova) - msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id); + msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace); drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 3c752cd0cc1c..2e7077194b21 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -994,7 +994,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) } ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, - priv->kms->id, &iova); + priv->kms->aspace, &iova); mutex_unlock(&dev->struct_mutex); if (ret) { pr_err("%s: failed to get iova, %d\n", __func__, ret); @@ -1152,7 +1152,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { ret = msm_gem_get_iova(msm_host->tx_gem_obj, - priv->kms->id, &dma_base); + priv->kms->aspace, &dma_base); if (ret) { pr_err("%s: failed to get iova: %d\n", __func__, ret); return ret; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index d9ee73c3672d..59153a4ebd18 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); struct msm_kms *kms = &mdp4_kms->base.base; - msm_gem_put_iova(val, kms->id); + msm_gem_put_iova(val, kms->aspace); drm_gem_object_unreference_unlocked(val); } @@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc) if (next_bo) { /* take a obj ref + iova ref when we start scanning out: */ drm_gem_object_reference(next_bo); - msm_gem_get_iova_locked(next_bo, kms->id, &iova); + msm_gem_get_iova_locked(next_bo, kms->aspace, &iova); /* enable cursor: */ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), @@ -432,7 +432,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, } if (cursor_bo) { - ret = msm_gem_get_iova(cursor_bo, kms->id, &iova); + ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova); if (ret) goto fail; } else { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 7cf4dd40de28..0c01f9fe0ef0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -163,7 +163,7 @@ static void mdp4_destroy(struct msm_kms *kms) struct msm_gem_address_space *aspace = kms->aspace; if (mdp4_kms->blank_cursor_iova) - msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->id); + msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace); drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); if (aspace) { @@ -545,7 +545,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } - ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->id, + ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace, &mdp4_kms->blank_cursor_iova); if (ret) { dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 17fb1d6f2f23..a20e3d644523 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -110,7 +110,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane, return 0; DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id); - return msm_framebuffer_prepare(fb, kms->id); + return msm_framebuffer_prepare(fb, kms->aspace); } static void mdp4_plane_cleanup_fb(struct drm_plane *plane, @@ -125,7 +125,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane, return; DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); - msm_framebuffer_cleanup(fb, kms->id); + msm_framebuffer_cleanup(fb, kms->aspace); } @@ -175,13 +175,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane, MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), - msm_framebuffer_iova(fb, kms->id, 0)); + msm_framebuffer_iova(fb, kms->aspace, 0)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), - msm_framebuffer_iova(fb, kms->id, 1)); + msm_framebuffer_iova(fb, kms->aspace, 1)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), - msm_framebuffer_iova(fb, kms->id, 2)); + msm_framebuffer_iova(fb, kms->aspace, 2)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), - msm_framebuffer_iova(fb, kms->id, 3)); + msm_framebuffer_iova(fb, kms->aspace, 3)); plane->fb = fb; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index d79c5faba35e..cb5415d6c04b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -162,7 +162,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); struct msm_kms *kms = &mdp5_kms->base.base; - msm_gem_put_iova(val, kms->id); + msm_gem_put_iova(val, kms->aspace); drm_gem_object_unreference_unlocked(val); } @@ -760,7 +760,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!cursor_bo) return -ENOENT; - ret = msm_gem_get_iova(cursor_bo, kms->id, &cursor_addr); + ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr); if (ret) return -EINVAL; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index b6a66befd1b7..fe3a4de1a433 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -279,7 +279,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane, return 0; DBG("%s: prepare: FB[%u]", plane->name, fb->base.id); - return msm_framebuffer_prepare(fb, kms->id); + return msm_framebuffer_prepare(fb, kms->aspace); } static void mdp5_plane_cleanup_fb(struct drm_plane *plane, @@ -293,7 +293,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, return; DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); - msm_framebuffer_cleanup(fb, kms->id); + msm_framebuffer_cleanup(fb, kms->aspace); } #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) @@ -511,13 +511,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms, MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), - msm_framebuffer_iova(fb, kms->id, 0)); + msm_framebuffer_iova(fb, kms->aspace, 0)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), - msm_framebuffer_iova(fb, kms->id, 1)); + msm_framebuffer_iova(fb, kms->aspace, 1)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), - msm_framebuffer_iova(fb, kms->id, 2)); + msm_framebuffer_iova(fb, kms->aspace, 2)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), - msm_framebuffer_iova(fb, kms->id, 3)); + msm_framebuffer_iova(fb, kms->aspace, 3)); } /* Note: mdp5_plane->pipe_lock must be locked */ diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index beb4f6b3ac70..a9c3c6b813d3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -51,6 +51,7 @@ static const struct drm_mode_config_funcs mode_config_funcs = { .atomic_state_free = msm_atomic_state_free, }; +#include "msm_gem.h" /* temporary */ int msm_register_address_space(struct drm_device *dev, struct msm_gem_address_space *aspace) { @@ -61,7 +62,9 @@ int msm_register_address_space(struct drm_device *dev, priv->aspace[priv->num_aspaces] = aspace; - return priv->num_aspaces++; + aspace->id = priv->num_aspaces++; + + return aspace->id; } #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING @@ -707,7 +710,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev, if (!priv->gpu) return -EINVAL; - return msm_gem_get_iova(obj, priv->gpu->id, iova); + return msm_gem_get_iova(obj, priv->gpu->aspace, iova); } static int msm_ioctl_gem_info(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 1b26ca626528..5570c5c91340 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -209,13 +209,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj, int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_fault(struct vm_fault *vmf); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); -int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint64_t *iova); -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova); -uint64_t msm_gem_iova(struct drm_gem_object *obj, int id); +int msm_gem_get_iova_locked(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova); +int msm_gem_get_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova); +uint64_t msm_gem_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace); struct page **msm_gem_get_pages(struct drm_gem_object *obj); void msm_gem_put_pages(struct drm_gem_object *obj); -void msm_gem_put_iova(struct drm_gem_object *obj, int id); +void msm_gem_put_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace); int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, @@ -251,9 +254,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt); -int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); -void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); -uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane); +int msm_framebuffer_prepare(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace); +void msm_framebuffer_cleanup(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace); +uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace, int plane); struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index ba2733a95a4f..6ecb7b170316 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -84,14 +84,15 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) * should be fine, since only the scanout (mdpN) side of things needs * this, the gpu doesn't care about fb's. */ -int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) +int msm_framebuffer_prepare(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); int ret, i, n = fb->format->num_planes; uint64_t iova; for (i = 0; i < n; i++) { - ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova); + ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova); DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret); if (ret) return ret; @@ -100,21 +101,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) return 0; } -void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id) +void msm_framebuffer_cleanup(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); int i, n = fb->format->num_planes; for (i = 0; i < n; i++) - msm_gem_put_iova(msm_fb->planes[i], id); + msm_gem_put_iova(msm_fb->planes[i], aspace); } -uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) +uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace, int plane) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); if (!msm_fb->planes[plane]) return 0; - return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane]; + return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane]; } struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 3c08d6d35944..803ed272dc6d 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -126,7 +126,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, * in panic (ie. lock-safe, etc) we could avoid pinning the * buffer now: */ - ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->id, &paddr); + ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr); if (ret) { dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); goto fail_unlock; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 38fbaadccfb7..0a38c5b1a799 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -308,10 +308,11 @@ put_iova(struct drm_gem_object *obj) * That means when I do eventually need to add support for unpinning * the refcnt counter needs to be atomic_t. */ -int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint64_t *iova) +int msm_gem_get_iova_locked(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + int id = aspace ? aspace->id : 0; int ret = 0; WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); @@ -338,9 +339,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, } /* get iova, taking a reference. Should have a matching put */ -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) +int msm_gem_get_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + int id = aspace ? aspace->id : 0; int ret; /* this is safe right now because we don't unmap until the @@ -353,7 +356,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) } mutex_lock(&obj->dev->struct_mutex); - ret = msm_gem_get_iova_locked(obj, id, iova); + ret = msm_gem_get_iova_locked(obj, aspace, iova); mutex_unlock(&obj->dev->struct_mutex); return ret; } @@ -361,14 +364,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) /* get iova without taking a reference, used in places where you have * already done a 'msm_gem_get_iova()'. */ -uint64_t msm_gem_iova(struct drm_gem_object *obj, int id) +uint64_t msm_gem_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + int id = aspace ? aspace->id : 0; WARN_ON(!msm_obj->domain[id].iova); return msm_obj->domain[id].iova; } -void msm_gem_put_iova(struct drm_gem_object *obj, int id) +void msm_gem_put_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) { // XXX TODO .. // NOTE: probably don't need a _locked() version.. we wouldn't diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 1b4cf20043ea..4b4b352b5718 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -33,6 +33,7 @@ struct msm_gem_address_space { struct drm_mm mm; struct msm_mmu *mmu; struct kref kref; + int id; /* temporary */ }; struct msm_gem_vma { diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 7832e6421d25..c8d01df993da 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -158,7 +158,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) struct msm_gem_object *msm_obj = submit->bos[i].obj; if (submit->bos[i].flags & BO_PINNED) - msm_gem_put_iova(&msm_obj->base, submit->gpu->id); + msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace); if (submit->bos[i].flags & BO_LOCKED) ww_mutex_unlock(&msm_obj->resv->lock); @@ -246,7 +246,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) /* if locking succeeded, pin bo: */ ret = msm_gem_get_iova_locked(&msm_obj->base, - submit->gpu->id, &iova); + submit->gpu->aspace, &iova); if (ret) break; diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ebbaed442e8a..36f0f1e5fc81 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -416,7 +416,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_gem_object *msm_obj = submit->bos[i].obj; /* move to inactive: */ msm_gem_move_to_inactive(&msm_obj->base); - msm_gem_put_iova(&msm_obj->base, gpu->id); + msm_gem_put_iova(&msm_obj->base, gpu->aspace); drm_gem_object_unreference(&msm_obj->base); } @@ -498,7 +498,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* submit takes a reference to the bo and iova until retired: */ drm_gem_object_reference(&msm_obj->base); msm_gem_get_iova_locked(&msm_obj->base, - submit->gpu->id, &iova); + submit->gpu->aspace, &iova); if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); @@ -694,7 +694,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) if (gpu->rb) { if (gpu->rb_iova) - msm_gem_put_iova(gpu->rb->bo, gpu->id); + msm_gem_put_iova(gpu->rb->bo, gpu->aspace); msm_ringbuffer_destroy(gpu->rb); } -- 2.30.2