uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
bool wake = false;
+ unsigned long irqflags;
/* Note there is a scenario here for an infinite loop but it's
* very unlikely to happen. For it to happen, the current polling
* have temporarly set the last_seq not to the true real last
* seq but to an older one.
*/
+ spin_lock_irqsave(&ring->fence_lock, irqflags);
last_seq = atomic64_read(&ring->fence_drv.last_seq);
do {
last_emitted = ring->fence_drv.sync_seq[ring->idx];
if (handled_seq == latest_seq) {
DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
ring->idx, latest_seq);
- return;
+ goto exit;
}
do {
amd_sched_isr(ring->scheduler);
wake_up_all(&ring->adev->fence_queue);
}
+exit:
+ spin_unlock_irqrestore(&ring->fence_lock, irqflags);
}
/**
}
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
+ spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
dev_err(adev->dev, "failed initializing fences (%d).\n", r);