*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
/* set to DX10/11 mode */
radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0));
static int evergreen_cp_start(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
int evergreen_cp_resume(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
static int evergreen_startup(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
int evergreen_suspend(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, &rdev->cp, 1024 * 1024);
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
u32 sync_type, u32 size,
u64 mc_addr)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
static void
set_shaders(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
/* VS */
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
/* high addr, stride */
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
/* workaround some hw bugs */
if (x2 == 0)
x1 = 1;
static void
draw_auto(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(cp, DI_PT_RECTLIST);
static void
set_default_state(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
static int cayman_cp_start(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
r = radeon_ring_lock(rdev, cp, 7);
static void cayman_cp_fini(struct radeon_device *rdev)
{
cayman_cp_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->cp);
+ radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
}
int cayman_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
- cp = &rdev->cp;
+ cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
/* ring1 - compute only */
/* Set ring buffer size */
- cp = &rdev->cp1;
+ cp = &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX];
rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
/* ring2 - compute only */
/* Set ring buffer size */
- cp = &rdev->cp2;
+ cp = &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX];
rb_bufsz = drm_order(cp->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
/* start the rings */
cayman_cp_start(rdev);
- rdev->cp.ready = true;
- rdev->cp1.ready = true;
- rdev->cp2.ready = true;
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+ rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
+ rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
/* this only test cp0 */
- r = radeon_ring_test(rdev, &rdev->cp);
+ r = radeon_ring_test(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
- rdev->cp.ready = false;
- rdev->cp1.ready = false;
- rdev->cp2.ready = false;
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
return r;
}
static int cayman_startup(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
{
/* FIXME: we should wait for ring to be empty */
cayman_cp_enable(rdev, false);
- rdev->cp.ready = false;
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
*/
int cayman_init(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* This don't do much */
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[fence->ring];
/* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
void r100_ring_start(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
r = radeon_ring_lock(rdev, cp, 2);
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
unsigned rb_bufsz;
unsigned rb_blksz;
unsigned max_fetch;
}
/* Disable ring */
r100_cp_disable(rdev);
- radeon_ring_fini(rdev, &rdev->cp);
+ radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n");
}
{
/* Disable ring */
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- rdev->cp.ready = false;
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp;
unsigned count, i, j;
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(cp, ib->gpu_addr);
/* Shutdown CP we shouldn't need to do that but better be safe than
* sorry
*/
- rdev->cp.ready = false;
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(R_000740_CP_CSQ_CNTL, 0);
/* Save few CRTC registers */
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
uint32_t cur_size;
int i, num_loops;
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[fence->ring];
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
void r300_ring_start(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
unsigned gb_tile_config;
int r;
static void r420_cp_errata_init(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
int r600_cp_start(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
uint32_t cp_me;
int r600_cp_resume(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
void r600_cp_fini(struct radeon_device *rdev)
{
r600_cp_stop(rdev);
- radeon_ring_fini(rdev, &rdev->cp);
+ radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
}
{
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
+ unsigned i, ridx = radeon_ring_index(rdev, cp);
int r;
r = radeon_scratch_get(rdev, &scratch);
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, cp, 3);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring %p (%d).\n", cp, r);
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
radeon_scratch_free(rdev, scratch);
return r;
}
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %p succeeded in %d usecs\n", cp, i);
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
} else {
- DRM_ERROR("radeon: ring %p test failed (scratch(0x%04X)=0x%08X)\n",
- cp, scratch, tmp);
+ DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+ ridx, scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[fence->ring];
if (rdev->wb.use_event) {
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
int r600_startup(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
- rdev->cp.ready = false;
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, &rdev->cp, 1024 * 1024);
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
/* FIXME: implement */
radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
unsigned count, i, j;
radeon_ring_free_size(rdev, cp);
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
u32 sync_type, u32 size,
u64 mc_addr)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
static void
set_shaders(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
u32 sq_pgm_resources;
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2;
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
if (h < 1)
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(cp, (x1 << 0) | (y1 << 16));
static void
draw_auto(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(cp, DI_PT_RECTLIST);
static void
set_default_state(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
int ring_size;
int num_loops = 0;
if (fence)
r = radeon_fence_emit(rdev, fence);
- radeon_ring_unlock_commit(rdev, &rdev->cp);
+ radeon_ring_unlock_commit(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
+ unsigned rptr_offs;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
struct radeon_semaphore_driver semaphore_drv;
- struct radeon_cp cp;
- struct radeon_cp cp1;
- struct radeon_cp cp2;
+ struct radeon_cp cp[RADEON_NUM_RINGS];
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
- mutex_init(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ mutex_init(&rdev->cp[i].mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
return 0;
}
fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
- if (!rdev->cp.ready)
+ if (!rdev->cp[fence->ring].ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
* if we experiencing a lockup the value doesn't change
*/
if (seq == rdev->fence_drv[fence->ring].last_seq &&
- radeon_gpu_is_lockup(rdev, &rdev->cp)) {
+ radeon_gpu_is_lockup(rdev, &rdev->cp[fence->ring])) {
/* good news we believe it's a lockup */
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
+ unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
- args->gart_size -= rdev->cp.ring_size;
+ for(i = 0; i < RADEON_NUM_RINGS; ++i)
+ args->gart_size -= rdev->cp[i].ring_size;
return 0;
}
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- if (rdev->cp.ring_obj)
- mutex_lock(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (rdev->cp[i].ring_obj)
+ mutex_lock(&rdev->cp[i].mutex);
+ }
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
radeon_irq_set(rdev);
}
} else {
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
if (cp->ready) {
struct radeon_fence *fence;
radeon_ring_alloc(rdev, cp, 64);
- radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, cp));
radeon_fence_emit(rdev, fence);
radeon_ring_commit(rdev, cp);
radeon_fence_wait(fence, false);
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- if (rdev->cp.ring_obj)
- mutex_unlock(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (rdev->cp[i].ring_obj)
+ mutex_unlock(&rdev->cp[i].mutex);
+ }
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
int r = 0;
if (!ib->length_dw || !cp->ready) {
/*
* Ring.
*/
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp)
+{
+ /* r1xx-r5xx only has CP ring */
+ if (rdev->family < CHIP_R600)
+ return RADEON_RING_TYPE_GFX_INDEX;
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX])
+ return CAYMAN_RING_TYPE_CP1_INDEX;
+ else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX])
+ return CAYMAN_RING_TYPE_CP2_INDEX;
+ }
+ return RADEON_RING_TYPE_GFX_INDEX;
+}
+
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
{
if (rdev->wb.enabled)
if (ndw < cp->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp));
if (r)
return r;
}
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
- radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, false);
+ radeon_semaphore_ring_emit(rdev, &rdev->cp[ring], semaphore, false);
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
- radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, true);
+ radeon_semaphore_ring_emit(rdev, &rdev->cp[ring], semaphore, true);
}
void radeon_semaphore_free(struct radeon_device *rdev,
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
- n -= rdev->cp.ring_size;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ n -= rdev->cp[i].ring_size;
if (rdev->wb.wb_obj)
n -= RADEON_GPU_PAGE_SIZE;
if (rdev->ih.ring_obj)
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->cp.ready == false)
+ if (rbo->rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
- if (!rdev->cp.ready) {
+ if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready) {
DRM_ERROR("Trying to move memory with CP turned off.\n");
return -EINVAL;
}
radeon_move_null(bo, new_mem);
return 0;
}
- if (!rdev->cp.ready || rdev->asic->copy == NULL) {
+ if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
/* use memcpy */
goto memcpy;
}
void rv515_ring_start(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
r = radeon_ring_lock(rdev, cp, 64);
void r700_cp_fini(struct radeon_device *rdev)
{
r700_cp_stop(rdev);
- radeon_ring_fini(rdev, &rdev->cp);
+ radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
}
/*
static int rv770_startup(struct radeon_device *rdev)
{
- struct radeon_cp *cp = &rdev->cp;
+ struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
- rdev->cp.ready = false;
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, &rdev->cp, 1024 * 1024);
+ rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);