From 4c266edb4c98e929ea8871525fa2c7bd2483e05f Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Thu, 24 Nov 2016 14:47:49 +0000 Subject: [PATCH] drm/i915: Rename i915_gem_timeline.next_seqno to .seqno Rename i915_gem_timeline member 'next_seqno' into 'seqno' as the variable is pre-increment. We've already had two bugs due to the confusing name, second is fixed as follow-up patch. Cc: Chris Wilson Signed-off-by: Joonas Lahtinen Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: http://patchwork.freedesktop.org/patch/msgid/20161124144750.2610-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_request.c | 14 +++++++------- drivers/gpu/drm/i915/i915_gem_timeline.h | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0c6323e0f9f9..66067c439935 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -552,7 +552,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", engine->name, work->flip_queued_req->global_seqno, - atomic_read(&dev_priv->gt.global_timeline.next_seqno), + atomic_read(&dev_priv->gt.global_timeline.seqno), intel_engine_get_seqno(engine), i915_gem_request_completed(work->flip_queued_req)); } else @@ -1026,7 +1026,7 @@ i915_next_seqno_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; - *val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno); + *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 9f37eaa3723a..82904595eaae 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -330,11 +330,11 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno) GEM_BUG_ON(i915->gt.active_requests > 1); /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ - if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) { + if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) { while (intel_breadcrumbs_busy(i915)) cond_resched(); /* spin until threads are complete */ } - atomic_set(&timeline->next_seqno, seqno); + atomic_set(&timeline->seqno, seqno); /* Finally reset hw state */ for_each_engine(engine, i915, id) @@ -369,11 +369,11 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) static int reserve_global_seqno(struct drm_i915_private *i915) { u32 active_requests = ++i915->gt.active_requests; - u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno); + u32 seqno = atomic_read(&i915->gt.global_timeline.seqno); int ret; /* Reservation is fine until we need to wrap around */ - if (likely(next_seqno + active_requests > next_seqno)) + if (likely(seqno + active_requests > seqno)) return 0; ret = i915_gem_init_global_seqno(i915, 0); @@ -387,13 +387,13 @@ static int reserve_global_seqno(struct drm_i915_private *i915) static u32 __timeline_get_seqno(struct i915_gem_timeline *tl) { - /* next_seqno only incremented under a mutex */ - return ++tl->next_seqno.counter; + /* seqno only incremented under a mutex */ + return ++tl->seqno.counter; } static u32 timeline_get_seqno(struct i915_gem_timeline *tl) { - return atomic_inc_return(&tl->next_seqno); + return atomic_inc_return(&tl->seqno); } void __i915_gem_request_submit(struct drm_i915_gem_request *request) diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h index 98d99a62b4ae..f2e51f42cc2f 100644 --- a/drivers/gpu/drm/i915/i915_gem_timeline.h +++ b/drivers/gpu/drm/i915/i915_gem_timeline.h @@ -56,7 +56,7 @@ struct intel_timeline { struct i915_gem_timeline { struct list_head link; - atomic_t next_seqno; + atomic_t seqno; struct drm_i915_private *i915; const char *name; -- 2.30.2