From: Christian König Date: Thu, 13 Oct 2011 11:19:22 +0000 (+0200) Subject: drm/radeon: make cp variable an array X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=bf85279958da96cb4b11aac89b34f0424c3c120e;p=openwrt%2Fstaging%2Fblogic.git drm/radeon: make cp variable an array Replace cp, cp1 and cp2 members with just an array of radeon_cp structs. Signed-off-by: Christian König Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index d1264a7154a6..cb198aca9f5a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1311,7 +1311,7 @@ void evergreen_mc_program(struct radeon_device *rdev) */ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[ib->fence->ring]; /* set to DX10/11 mode */ radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0)); @@ -1362,7 +1362,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) static int evergreen_cp_start(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r, i; uint32_t cp_me; @@ -1428,7 +1428,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) int evergreen_cp_resume(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 tmp; u32 rb_bufsz; int r; @@ -3056,7 +3056,7 @@ restart_ih: static int evergreen_startup(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; /* enable pcie gen2 link */ @@ -3168,7 +3168,7 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); @@ -3251,8 +3251,8 @@ int evergreen_init(struct radeon_device *rdev) if (r) return r; - rdev->cp.ring_obj = NULL; - r600_ring_init(rdev, &rdev->cp, 1024 * 1024); + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 75d0a6f0a395..56f5d92cce24 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -49,7 +49,7 @@ static void set_render_target(struct radeon_device *rdev, int format, int w, int h, u64 gpu_addr) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 cb_color_info; int pitch, slice; @@ -88,7 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev, u32 sync_type, u32 size, u64 mc_addr) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 cp_coher_size; if (size == 0xffffffff) @@ -116,7 +116,7 @@ cp_set_surface_sync(struct radeon_device *rdev, static void set_shaders(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u64 gpu_addr; /* VS */ @@ -144,7 +144,7 @@ set_shaders(struct radeon_device *rdev) static void set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_vtx_constant_word2, sq_vtx_constant_word3; /* high addr, stride */ @@ -189,7 +189,7 @@ set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, u64 gpu_addr, u32 size) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_tex_resource_word0, sq_tex_resource_word1; u32 sq_tex_resource_word4, sq_tex_resource_word7; @@ -230,7 +230,7 @@ static void set_scissors(struct radeon_device *rdev, int x1, int y1, int x2, int y2) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; /* workaround some hw bugs */ if (x2 == 0) x1 = 1; @@ -261,7 +261,7 @@ set_scissors(struct radeon_device *rdev, int x1, int y1, static void draw_auto(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); radeon_ring_write(cp, DI_PT_RECTLIST); @@ -286,7 +286,7 @@ draw_auto(struct radeon_device *rdev) static void set_default_state(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index cc9aaeb104f5..2d809e62c4c6 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1049,7 +1049,7 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev) static int cayman_cp_start(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r, i; r = radeon_ring_lock(rdev, cp, 7); @@ -1116,7 +1116,7 @@ static int cayman_cp_start(struct radeon_device *rdev) static void cayman_cp_fini(struct radeon_device *rdev) { cayman_cp_enable(rdev, false); - radeon_ring_fini(rdev, &rdev->cp); + radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); } int cayman_cp_resume(struct radeon_device *rdev) @@ -1147,7 +1147,7 @@ int cayman_cp_resume(struct radeon_device *rdev) /* ring 0 - compute and gfx */ /* Set ring buffer size */ - cp = &rdev->cp; + cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; rb_bufsz = drm_order(cp->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN @@ -1181,7 +1181,7 @@ int cayman_cp_resume(struct radeon_device *rdev) /* ring1 - compute only */ /* Set ring buffer size */ - cp = &rdev->cp1; + cp = &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX]; rb_bufsz = drm_order(cp->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN @@ -1207,7 +1207,7 @@ int cayman_cp_resume(struct radeon_device *rdev) /* ring2 - compute only */ /* Set ring buffer size */ - cp = &rdev->cp2; + cp = &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX]; rb_bufsz = drm_order(cp->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN @@ -1233,15 +1233,15 @@ int cayman_cp_resume(struct radeon_device *rdev) /* start the rings */ cayman_cp_start(rdev); - rdev->cp.ready = true; - rdev->cp1.ready = true; - rdev->cp2.ready = true; + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = true; + rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = true; + rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = true; /* this only test cp0 */ - r = radeon_ring_test(rdev, &rdev->cp); + r = radeon_ring_test(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); if (r) { - rdev->cp.ready = false; - rdev->cp1.ready = false; - rdev->cp2.ready = false; + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; + rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; + rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; return r; } @@ -1343,7 +1343,7 @@ int cayman_asic_reset(struct radeon_device *rdev) static int cayman_startup(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; /* enable pcie gen2 link */ @@ -1438,7 +1438,7 @@ int cayman_suspend(struct radeon_device *rdev) { /* FIXME: we should wait for ring to be empty */ cayman_cp_enable(rdev, false); - rdev->cp.ready = false; + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); cayman_pcie_gart_disable(rdev); @@ -1455,7 +1455,7 @@ int cayman_suspend(struct radeon_device *rdev) */ int cayman_init(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; /* This don't do much */ diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 6c328115e66c..6ca20d7bf626 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -811,7 +811,7 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[fence->ring]; /* We have to make sure that caches are flushed before * CPU might read something from VRAM. */ @@ -849,7 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_fence *fence) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; uint32_t cur_pages; uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; uint32_t pitch; @@ -934,7 +934,7 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev) void r100_ring_start(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; r = radeon_ring_lock(rdev, cp, 2); @@ -1048,7 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev) int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; unsigned rb_bufsz; unsigned rb_blksz; unsigned max_fetch; @@ -1162,7 +1162,7 @@ void r100_cp_fini(struct radeon_device *rdev) } /* Disable ring */ r100_cp_disable(rdev); - radeon_ring_fini(rdev, &rdev->cp); + radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); DRM_INFO("radeon: cp finalized\n"); } @@ -1170,7 +1170,7 @@ void r100_cp_disable(struct radeon_device *rdev) { /* Disable ring */ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); - rdev->cp.ready = false; + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; WREG32(RADEON_CP_CSQ_MODE, 0); WREG32(RADEON_CP_CSQ_CNTL, 0); WREG32(R_000770_SCRATCH_UMSK, 0); @@ -2587,7 +2587,7 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; uint32_t rdp, wdp; unsigned count, i, j; @@ -3686,7 +3686,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1)); radeon_ring_write(cp, ib->gpu_addr); @@ -3778,7 +3778,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) /* Shutdown CP we shouldn't need to do that but better be safe than * sorry */ - rdev->cp.ready = false; + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; WREG32(R_000740_CP_CSQ_CNTL, 0); /* Save few CRTC registers */ diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index d84e633f72fc..d59c727a8e0f 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -87,7 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_fence *fence) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; uint32_t size; uint32_t cur_size; int i, num_loops; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index cbb62fc3f2e9..66ff35f394ce 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -175,7 +175,7 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev) void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[fence->ring]; /* Who ever call radeon_fence_emit should call ring_lock and ask * for enough space (today caller are ib schedule and buffer move) */ @@ -208,7 +208,7 @@ void r300_fence_ring_emit(struct radeon_device *rdev, void r300_ring_start(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; unsigned gb_tile_config; int r; diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 4c0af4955f08..62e860436999 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -199,7 +199,7 @@ static void r420_clock_resume(struct radeon_device *rdev) static void r420_cp_errata_init(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; /* RV410 and R420 can lock up if CP DMA to host memory happens * while the 2D engine is busy. @@ -217,7 +217,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev) static void r420_cp_errata_fini(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; /* Catch the RESYNC we dispatched all the way back, * at the very beginning of the CP init. diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 599753176741..aaf8cd42943e 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2144,7 +2144,7 @@ static int r600_cp_load_microcode(struct radeon_device *rdev) int r600_cp_start(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; uint32_t cp_me; @@ -2174,7 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev) int r600_cp_resume(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 tmp; u32 rb_bufsz; int r; @@ -2248,7 +2248,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned r void r600_cp_fini(struct radeon_device *rdev) { r600_cp_stop(rdev); - radeon_ring_fini(rdev, &rdev->cp); + radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); } @@ -2271,7 +2271,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) { uint32_t scratch; uint32_t tmp = 0; - unsigned i; + unsigned i, ridx = radeon_ring_index(rdev, cp); int r; r = radeon_scratch_get(rdev, &scratch); @@ -2282,7 +2282,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) WREG32(scratch, 0xCAFEDEAD); r = radeon_ring_lock(rdev, cp, 3); if (r) { - DRM_ERROR("radeon: cp failed to lock ring %p (%d).\n", cp, r); + DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r); radeon_scratch_free(rdev, scratch); return r; } @@ -2297,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) DRM_UDELAY(1); } if (i < rdev->usec_timeout) { - DRM_INFO("ring test on %p succeeded in %d usecs\n", cp, i); + DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i); } else { - DRM_ERROR("radeon: ring %p test failed (scratch(0x%04X)=0x%08X)\n", - cp, scratch, tmp); + DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n", + ridx, scratch, tmp); r = -EINVAL; } radeon_scratch_free(rdev, scratch); @@ -2310,7 +2310,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[fence->ring]; if (rdev->wb.use_event) { u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + @@ -2420,7 +2420,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) int r600_startup(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; /* enable pcie gen2 link */ @@ -2534,7 +2534,7 @@ int r600_suspend(struct radeon_device *rdev) r600_audio_fini(rdev); /* FIXME: we should wait for ring to be empty */ r600_cp_stop(rdev); - rdev->cp.ready = false; + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; r600_irq_suspend(rdev); radeon_wb_disable(rdev); r600_pcie_gart_disable(rdev); @@ -2609,8 +2609,8 @@ int r600_init(struct radeon_device *rdev) if (r) return r; - rdev->cp.ring_obj = NULL; - r600_ring_init(rdev, &rdev->cp, 1024 * 1024); + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -2677,7 +2677,7 @@ void r600_fini(struct radeon_device *rdev) */ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[ib->fence->ring]; /* FIXME: implement */ radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); @@ -3518,7 +3518,7 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; unsigned count, i, j; radeon_ring_free_size(rdev, cp); diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 39ae19d38c2f..62dd1c281c76 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -50,7 +50,7 @@ static void set_render_target(struct radeon_device *rdev, int format, int w, int h, u64 gpu_addr) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 cb_color_info; int pitch, slice; @@ -104,7 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev, u32 sync_type, u32 size, u64 mc_addr) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 cp_coher_size; if (size == 0xffffffff) @@ -123,7 +123,7 @@ cp_set_surface_sync(struct radeon_device *rdev, static void set_shaders(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u64 gpu_addr; u32 sq_pgm_resources; @@ -170,7 +170,7 @@ set_shaders(struct radeon_device *rdev) static void set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_vtx_constant_word2; sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | @@ -207,7 +207,7 @@ set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, u64 gpu_addr, u32 size) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; if (h < 1) @@ -246,7 +246,7 @@ static void set_scissors(struct radeon_device *rdev, int x1, int y1, int x2, int y2) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(cp, (x1 << 0) | (y1 << 16)); @@ -267,7 +267,7 @@ set_scissors(struct radeon_device *rdev, int x1, int y1, static void draw_auto(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(cp, DI_PT_RECTLIST); @@ -292,7 +292,7 @@ draw_auto(struct radeon_device *rdev) static void set_default_state(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; @@ -687,7 +687,7 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages, int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; int ring_size; int num_loops = 0; @@ -727,7 +727,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) if (fence) r = radeon_fence_emit(rdev, fence); - radeon_ring_unlock_commit(rdev, &rdev->cp); + radeon_ring_unlock_commit(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); } void r600_kms_blit_copy(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index bbe88ec3951d..76c58e9e4778 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -547,6 +547,7 @@ struct r600_ih { struct radeon_bo *ring_obj; volatile uint32_t *ring; unsigned rptr; + unsigned rptr_offs; unsigned wptr; unsigned wptr_old; unsigned ring_size; @@ -598,6 +599,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev); int radeon_ib_test(struct radeon_device *rdev); extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); /* Ring access between begin & end cannot sleep */ +int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp); void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp); int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw); int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw); @@ -1284,9 +1286,7 @@ struct radeon_device { rwlock_t fence_lock; struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; struct radeon_semaphore_driver semaphore_drv; - struct radeon_cp cp; - struct radeon_cp cp1; - struct radeon_cp cp2; + struct radeon_cp cp[RADEON_NUM_RINGS]; struct radeon_ib_pool ib_pool; struct radeon_irq irq; struct radeon_asic *asic; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 36296ad397a1..023c156eddd0 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -718,7 +718,8 @@ int radeon_device_init(struct radeon_device *rdev, * can recall function without having locking issues */ radeon_mutex_init(&rdev->cs_mutex); mutex_init(&rdev->ib_pool.mutex); - mutex_init(&rdev->cp.mutex); + for (i = 0; i < RADEON_NUM_RINGS; ++i) + mutex_init(&rdev->cp[i].mutex); mutex_init(&rdev->dc_hw_i2c_mutex); if (rdev->family >= CHIP_R600) spin_lock_init(&rdev->ih.lock); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 9ed0bb100bcb..86f4eeaeba66 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -84,7 +84,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) return 0; } fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq); - if (!rdev->cp.ready) + if (!rdev->cp[fence->ring].ready) /* FIXME: cp is not running assume everythings is done right * away */ @@ -269,7 +269,7 @@ retry: * if we experiencing a lockup the value doesn't change */ if (seq == rdev->fence_drv[fence->ring].last_seq && - radeon_gpu_is_lockup(rdev, &rdev->cp)) { + radeon_gpu_is_lockup(rdev, &rdev->cp[fence->ring])) { /* good news we believe it's a lockup */ printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 136772ccfe72..1ce8fa71cf73 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -152,6 +152,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_info *args = data; struct ttm_mem_type_manager *man; + unsigned i; man = &rdev->mman.bdev.man[TTM_PL_VRAM]; @@ -161,7 +162,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); args->vram_visible -= radeon_fbdev_total_size(rdev); args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; - args->gart_size -= rdev->cp.ring_size; + for(i = 0; i < RADEON_NUM_RINGS; ++i) + args->gart_size -= rdev->cp[i].ring_size; return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 73b6714d615b..50b632ac8237 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -252,8 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) mutex_lock(&rdev->ddev->struct_mutex); mutex_lock(&rdev->vram_mutex); - if (rdev->cp.ring_obj) - mutex_lock(&rdev->cp.mutex); + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (rdev->cp[i].ring_obj) + mutex_lock(&rdev->cp[i].mutex); + } /* gui idle int has issues on older chips it seems */ if (rdev->family >= CHIP_R600) { @@ -269,11 +271,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) radeon_irq_set(rdev); } } else { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; if (cp->ready) { struct radeon_fence *fence; radeon_ring_alloc(rdev, cp, 64); - radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); + radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, cp)); radeon_fence_emit(rdev, fence); radeon_ring_commit(rdev, cp); radeon_fence_wait(fence, false); @@ -309,8 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; - if (rdev->cp.ring_obj) - mutex_unlock(&rdev->cp.mutex); + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + if (rdev->cp[i].ring_obj) + mutex_unlock(&rdev->cp[i].mutex); + } mutex_unlock(&rdev->vram_mutex); mutex_unlock(&rdev->ddev->struct_mutex); } diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 209834dbf18f..d96b13af5e9d 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -178,7 +178,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[ib->fence->ring]; int r = 0; if (!ib->length_dw || !cp->ready) { @@ -284,6 +284,21 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) /* * Ring. */ +int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp) +{ + /* r1xx-r5xx only has CP ring */ + if (rdev->family < CHIP_R600) + return RADEON_RING_TYPE_GFX_INDEX; + + if (rdev->family >= CHIP_CAYMAN) { + if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX]) + return CAYMAN_RING_TYPE_CP1_INDEX; + else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX]) + return CAYMAN_RING_TYPE_CP2_INDEX; + } + return RADEON_RING_TYPE_GFX_INDEX; +} + void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp) { if (rdev->wb.enabled) @@ -312,7 +327,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned if (ndw < cp->ring_free_dw) { break; } - r = radeon_fence_wait_next(rdev, RADEON_RING_TYPE_GFX_INDEX); + r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp)); if (r) return r; } diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 064694a67824..bf4789eed0b2 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -121,13 +121,13 @@ int radeon_semaphore_create(struct radeon_device *rdev, void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore) { - radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, false); + radeon_semaphore_ring_emit(rdev, &rdev->cp[ring], semaphore, false); } void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore) { - radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, true); + radeon_semaphore_ring_emit(rdev, &rdev->cp[ring], semaphore, true); } void radeon_semaphore_free(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index ee6c160ffae9..160e7df77551 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -43,7 +43,8 @@ void radeon_test_moves(struct radeon_device *rdev) * (Total GTT - IB pool - writeback page - ring buffers) / test size */ n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; - n -= rdev->cp.ring_size; + for (i = 0; i < RADEON_NUM_RINGS; ++i) + n -= rdev->cp[i].ring_size; if (rdev->wb.wb_obj) n -= RADEON_GPU_PAGE_SIZE; if (rdev->ih.ring_obj) diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 112ecaa6362b..0be15bf38d3c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -188,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, rbo = container_of(bo, struct radeon_bo, tbo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: - if (rbo->rdev->cp.ready == false) + if (rbo->rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready == false) radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); else radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); @@ -255,7 +255,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); return -EINVAL; } - if (!rdev->cp.ready) { + if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready) { DRM_ERROR("Trying to move memory with CP turned off.\n"); return -EINVAL; } @@ -380,7 +380,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, radeon_move_null(bo, new_mem); return 0; } - if (!rdev->cp.ready || rdev->asic->copy == NULL) { + if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) { /* use memcpy */ goto memcpy; } diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 8fe13ba8143a..8a935987d022 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -55,7 +55,7 @@ void rv515_debugfs(struct radeon_device *rdev) void rv515_ring_start(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; r = radeon_ring_lock(rdev, cp, 64); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index a2c60598d0f7..9e4c0418f54d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) void r700_cp_fini(struct radeon_device *rdev) { r700_cp_stop(rdev); - radeon_ring_fini(rdev, &rdev->cp); + radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); } /* @@ -1043,7 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev) static int rv770_startup(struct radeon_device *rdev) { - struct radeon_cp *cp = &rdev->cp; + struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; int r; /* enable pcie gen2 link */ @@ -1144,7 +1144,7 @@ int rv770_suspend(struct radeon_device *rdev) r600_audio_fini(rdev); /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); - rdev->cp.ready = false; + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; r600_irq_suspend(rdev); radeon_wb_disable(rdev); rv770_pcie_gart_disable(rdev); @@ -1217,8 +1217,8 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; - rdev->cp.ring_obj = NULL; - r600_ring_init(rdev, &rdev->cp, 1024 * 1024); + rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024);