unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
- struct amdgpu_fence **array,
+ struct fence **array,
uint32_t count,
bool wait_all,
bool intr,
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
- struct amdgpu_fence *fence;
+ struct fence *fence;
};
/*
struct amdgpu_semaphore *semaphore);
void amdgpu_semaphore_free(struct amdgpu_device *adev,
struct amdgpu_semaphore **semaphore,
- struct amdgpu_fence *fence);
+ struct fence *fence);
/*
* Synchronization
int amdgpu_sync_rings(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct amdgpu_fence *fence);
+ struct fence *fence);
/*
* GART structures, functions & helpers
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
-static bool amdgpu_test_signaled_any(struct amdgpu_fence **fences, uint32_t count)
+static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
{
int idx;
- struct amdgpu_fence *fence;
+ struct fence *fence;
for (idx = 0; idx < count; ++idx) {
fence = fences[idx];
if (fence) {
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return true;
}
}
return false;
}
-static bool amdgpu_test_signaled_all(struct amdgpu_fence **fences, uint32_t count)
+static bool amdgpu_test_signaled_all(struct fence **fences, uint32_t count)
{
int idx;
- struct amdgpu_fence *fence;
+ struct fence *fence;
for (idx = 0; idx < count; ++idx) {
fence = fences[idx];
if (fence) {
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return false;
}
}
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_device *adev = fence->ring->adev;
- return amdgpu_fence_wait_multiple(adev, &fence, 1, false, intr, t);
+ return amdgpu_fence_wait_multiple(adev, &f, 1, false, intr, t);
}
/**
* If wait_all is false, it will return when any fence is signaled or timeout.
*/
signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
- struct amdgpu_fence **array,
+ struct fence **array,
uint32_t count,
bool wait_all,
bool intr,
{
long idx = 0;
struct amdgpu_wait_cb *cb;
- struct amdgpu_fence *fence;
+ struct fence *fence;
BUG_ON(!array);
fence = array[idx];
if (fence) {
cb[idx].task = current;
- if (fence_add_callback(&fence->base,
+ if (fence_add_callback(fence,
&cb[idx].base, amdgpu_fence_wait_cb)) {
/* The fence is already signaled */
if (wait_all)
for (idx = 0; idx < count; ++idx) {
fence = array[idx];
if (fence)
- fence_remove_callback(&fence->base, &cb[idx].base);
+ fence_remove_callback(fence, &cb[idx].base);
}
err_free_cb:
*/
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
{
- amdgpu_sync_free(adev, &ib->sync, ib->fence);
- amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
+ amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
+ amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
amdgpu_fence_unref(&ib->fence);
}
unsigned size, unsigned align);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
struct amdgpu_sa_bo **sa_bo,
- struct amdgpu_fence *fence);
+ struct fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
return r;
}
+static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
+{
+ struct amdgpu_fence *a_fence;
+ struct amd_sched_fence *s_fence;
+
+ s_fence = to_amd_sched_fence(f);
+ if (s_fence)
+ return s_fence->entity->scheduler->ring_id;
+ a_fence = to_amdgpu_fence(f);
+ if (a_fence)
+ return a_fence->ring->idx;
+ return 0;
+}
+
static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
{
struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
- amdgpu_fence_unref(&sa_bo->fence);
+ fence_put(sa_bo->fence);
kfree(sa_bo);
}
sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL ||
- !fence_is_signaled(&sa_bo->fence->base)) {
+ !fence_is_signaled(sa_bo->fence)) {
return;
}
amdgpu_sa_bo_remove_locked(sa_bo);
}
static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_fence **fences,
+ struct fence **fences,
unsigned *tries)
{
struct amdgpu_sa_bo *best_bo = NULL;
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
- if (!fence_is_signaled(&sa_bo->fence->base)) {
+ if (!fence_is_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
}
if (best_bo) {
- ++tries[best_bo->fence->ring->idx];
+ uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
+ ++tries[idx];
sa_manager->hole = best_bo->olist.prev;
/* we knew that this one is signaled,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
+ struct fence *fences[AMDGPU_MAX_RINGS];
unsigned tries[AMDGPU_MAX_RINGS];
int i, r;
signed long t;
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
struct amdgpu_sa_manager *sa_manager;
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
- if (fence && !fence_is_signaled(&fence->base)) {
- (*sa_bo)->fence = amdgpu_fence_ref(fence);
- list_add_tail(&(*sa_bo)->flist,
- &sa_manager->flist[fence->ring->idx]);
+ if (fence && !fence_is_signaled(fence)) {
+ uint32_t idx;
+ (*sa_bo)->fence = fence_get(fence);
+ idx = amdgpu_sa_get_ring_from_fence(fence);
+ list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
amdgpu_sa_bo_remove_locked(*sa_bo);
}
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
if (i->fence) {
- seq_printf(m, " protected by 0x%016llx on ring %d",
- i->fence->seq, i->fence->ring->idx);
+ struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
+ if (a_fence)
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ a_fence->seq, a_fence->ring->idx);
+ if (s_fence)
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ s_fence->v_seq,
+ s_fence->entity->scheduler->ring_id);
+
}
seq_printf(m, "\n");
}
void amdgpu_semaphore_free(struct amdgpu_device *adev,
struct amdgpu_semaphore **semaphore,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
if (semaphore == NULL || *semaphore == NULL) {
return;
*/
void amdgpu_sync_free(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
- struct amdgpu_fence *fence)
+ struct fence *fence)
{
unsigned i;
}
amdgpu_ring_unlock_commit(ring);
- amdgpu_sync_free(adev, &sync, *fence);
+ amdgpu_sync_free(adev, &sync, &(*fence)->base);
return 0;
}