Store the IOMMU mapping created by the device core of each Exynos DRM
sub-device and restore it when the Exynos DRM driver is unbound. This
fixes IOMMU initialization failure for the second time when a deferred
probe is triggered from the bind() callback of master's compound DRM
driver. This also fixes the following issue found using kmemleak
detector:
unreferenced object 0xc2137640 (size 64):
comm "swapper/0", pid 1, jiffies
4294937900 (age 3127.400s)
hex dump (first 32 bytes):
50 a3 14 c2 80 a2 14 c2 01 00 00 00 20 00 00 00 P........... ...
00 10 00 00 00 80 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<
3acd268d>] arch_setup_dma_ops+0x4c/0x104
[<
9f7d2cce>] of_dma_configure+0x19c/0x3a4
[<
ba07704b>] really_probe+0xb0/0x47c
[<
4f510e4f>] driver_probe_device+0x78/0x1c4
[<
7481a0cf>] device_driver_attach+0x58/0x60
[<
0ff8f5c1>] __driver_attach+0xb8/0x158
[<
86006144>] bus_for_each_dev+0x74/0xb4
[<
10159dca>] bus_add_driver+0x1c0/0x200
[<
8a265265>] driver_register+0x74/0x108
[<
e0f3451a>] exynos_drm_init+0xb0/0x134
[<
db3fc7ba>] do_one_initcall+0x90/0x458
[<
6da35917>] kernel_init_freeable+0x188/0x200
[<
db3f74d4>] kernel_init+0x8/0x110
[<
1f3cddf9>] ret_from_fork+0x14/0x20
[<
8cd12507>] 0x0
unreferenced object 0xc214a280 (size 128):
comm "swapper/0", pid 1, jiffies
4294937900 (age 3127.400s)
hex dump (first 32 bytes):
00 a0 ec ed 00 00 00 00 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<
3acd268d>] arch_setup_dma_ops+0x4c/0x104
[<
9f7d2cce>] of_dma_configure+0x19c/0x3a4
[<
ba07704b>] really_probe+0xb0/0x47c
[<
4f510e4f>] driver_probe_device+0x78/0x1c4
[<
7481a0cf>] device_driver_attach+0x58/0x60
[<
0ff8f5c1>] __driver_attach+0xb8/0x158
[<
86006144>] bus_for_each_dev+0x74/0xb4
[<
10159dca>] bus_add_driver+0x1c0/0x200
[<
8a265265>] driver_register+0x74/0x108
[<
e0f3451a>] exynos_drm_init+0xb0/0x134
[<
db3fc7ba>] do_one_initcall+0x90/0x458
[<
6da35917>] kernel_init_freeable+0x188/0x200
[<
db3f74d4>] kernel_init+0x8/0x110
[<
1f3cddf9>] ret_from_fork+0x14/0x20
[<
8cd12507>] 0x0
unreferenced object 0xedeca000 (size 4096):
comm "swapper/0", pid 1, jiffies
4294937900 (age 3127.400s)
hex dump (first 32 bytes):
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<
3acd268d>] arch_setup_dma_ops+0x4c/0x104
[<
9f7d2cce>] of_dma_configure+0x19c/0x3a4
[<
ba07704b>] really_probe+0xb0/0x47c
[<
4f510e4f>] driver_probe_device+0x78/0x1c4
[<
7481a0cf>] device_driver_attach+0x58/0x60
[<
0ff8f5c1>] __driver_attach+0xb8/0x158
[<
86006144>] bus_for_each_dev+0x74/0xb4
[<
10159dca>] bus_add_driver+0x1c0/0x200
[<
8a265265>] driver_register+0x74/0x108
[<
e0f3451a>] exynos_drm_init+0xb0/0x134
[<
db3fc7ba>] do_one_initcall+0x90/0x458
[<
6da35917>] kernel_init_freeable+0x188/0x200
[<
db3f74d4>] kernel_init+0x8/0x110
[<
1f3cddf9>] ret_from_fork+0x14/0x20
[<
8cd12507>] 0x0
unreferenced object 0xc214a300 (size 128):
comm "swapper/0", pid 1, jiffies
4294937900 (age 3127.400s)
hex dump (first 32 bytes):
00 a3 14 c2 00 a3 14 c2 00 40 18 c2 00 80 18 c2 .........@......
02 00 02 00 ad 4e ad de ff ff ff ff ff ff ff ff .....N..........
backtrace:
[<
08cbd8bc>] iommu_domain_alloc+0x24/0x50
[<
b835abee>] arm_iommu_create_mapping+0xe4/0x134
[<
3acd268d>] arch_setup_dma_ops+0x4c/0x104
[<
9f7d2cce>] of_dma_configure+0x19c/0x3a4
[<
ba07704b>] really_probe+0xb0/0x47c
[<
4f510e4f>] driver_probe_device+0x78/0x1c4
[<
7481a0cf>] device_driver_attach+0x58/0x60
[<
0ff8f5c1>] __driver_attach+0xb8/0x158
[<
86006144>] bus_for_each_dev+0x74/0xb4
[<
10159dca>] bus_add_driver+0x1c0/0x200
[<
8a265265>] driver_register+0x74/0x108
[<
e0f3451a>] exynos_drm_init+0xb0/0x134
[<
db3fc7ba>] do_one_initcall+0x90/0x458
[<
6da35917>] kernel_init_freeable+0x188/0x200
[<
db3f74d4>] kernel_init+0x8/0x110
[<
1f3cddf9>] ret_from_fork+0x14/0x20
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
decon_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static const struct component_ops decon_component_ops = {
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
decon_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, ctx->dev);
+ return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
* mapping.
*/
static int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
+ struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
return ret;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
- if (to_dma_iommu_mapping(subdrv_dev))
+ /*
+ * Keep the original DMA mapping of the sub-device and
+ * restore it on Exynos DRM detach, otherwise the DMA
+ * framework considers it as IOMMU-less during the next
+ * probe (in case of deferred probe or modular build)
+ */
+ *dma_priv = to_dma_iommu_mapping(subdrv_dev);
+ if (*dma_priv)
arm_iommu_detach_device(subdrv_dev);
ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
* mapping
*/
static void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
+ struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
arm_iommu_detach_device(subdrv_dev);
- else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ arm_iommu_attach_device(subdrv_dev, *dma_priv);
+ } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
iommu_detach_device(priv->mapping, subdrv_dev);
clear_dma_max_seg_size(subdrv_dev);
}
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv)
{
struct exynos_drm_private *priv = drm->dev_private;
priv->mapping = mapping;
}
- return drm_iommu_attach_device(drm, dev);
+ return drm_iommu_attach_device(drm, dev, dma_priv);
}
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv)
{
if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
- drm_iommu_detach_device(drm, dev);
+ drm_iommu_detach_device(drm, dev, dma_priv);
}
void exynos_drm_cleanup_dma(struct drm_device *drm)
return priv->mapping ? true : false;
}
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+ void **dma_priv);
void exynos_drm_cleanup_dma(struct drm_device *drm);
#ifdef CONFIG_DRM_EXYNOS_DPI
struct fimc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
ctx->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops fimc_component_ops = {
struct fimd_context {
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR];
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
- return exynos_drm_register_dma(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
}
static void fimd_unbind(struct device *dev, struct device *master,
fimd_atomic_disable(ctx->crtc);
- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
struct g2d_data {
struct device *dev;
+ void *dma_priv;
struct clk *gate_clk;
void __iomem *regs;
int irq;
return ret;
}
- ret = exynos_drm_register_dma(drm_dev, dev);
+ ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
- exynos_drm_unregister_dma(g2d->drm_dev, dev);
+ exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
}
static const struct component_ops g2d_component_ops = {
struct gsc_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats;
ctx->drm_dev = drm_dev;
ctx->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
}
static const struct component_ops gsc_component_ops = {
struct rot_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock;
rot->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
+ exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
}
static const struct component_ops rotator_component_ops = {
struct scaler_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
+ void *dma_priv;
struct device *dev;
void __iomem *regs;
struct clk *clock[SCALER_MAX_CLK];
scaler->drm_dev = drm_dev;
ipp->drm_dev = drm_dev;
- exynos_drm_register_dma(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(dev, ipp);
- exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
+ exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
+ &scaler->dma_priv);
}
static const struct component_ops scaler_component_ops = {
struct platform_device *pdev;
struct device *dev;
struct drm_device *drm_dev;
+ void *dma_priv;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[MIXER_WIN_NR];
unsigned long flags;
}
}
- return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
+ return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
+ &mixer_ctx->dma_priv);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
+ exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
+ &mixer_ctx->dma_priv);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)