struct dma_resv_list *fobj;
struct dma_fence *fence_excl;
__poll_t events;
- unsigned shared_count;
+ unsigned shared_count, seq;
dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
if (!events)
return 0;
+retry:
+ seq = read_seqcount_begin(&resv->seq);
rcu_read_lock();
- dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
+
+ fobj = rcu_dereference(resv->fence);
+ if (fobj)
+ shared_count = fobj->shared_count;
+ else
+ shared_count = 0;
+ fence_excl = rcu_dereference(resv->fence_excl);
+ if (read_seqcount_retry(&resv->seq, seq)) {
+ rcu_read_unlock();
+ goto retry;
+ }
+
if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
__poll_t pevents = EPOLLIN;
struct dma_resv *robj;
struct dma_resv_list *fobj;
struct dma_fence *fence;
+ unsigned seq;
int count = 0, attach_count, shared_count, i;
size_t size = 0;
buf_obj->name ?: "");
robj = buf_obj->resv;
- rcu_read_lock();
- dma_resv_fences(robj, &fence, &fobj, &shared_count);
- rcu_read_unlock();
+ while (true) {
+ seq = read_seqcount_begin(&robj->seq);
+ rcu_read_lock();
+ fobj = rcu_dereference(robj->fence);
+ shared_count = fobj ? fobj->shared_count : 0;
+ fence = rcu_dereference(robj->fence_excl);
+ if (!read_seqcount_retry(&robj->seq, seq))
+ break;
+ rcu_read_unlock();
+ }
if (fence)
seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
+struct lock_class_key reservation_seqcount_class;
+EXPORT_SYMBOL(reservation_seqcount_class);
+
+const char reservation_seqcount_string[] = "reservation_seqcount";
+EXPORT_SYMBOL(reservation_seqcount_string);
+
/**
* dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
void dma_resv_init(struct dma_resv *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+ __seqcount_init(&obj->seq, reservation_seqcount_string,
+ &reservation_seqcount_class);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
}
fobj = dma_resv_get_list(obj);
count = fobj->shared_count;
+ preempt_disable();
+ write_seqcount_begin(&obj->seq);
+
for (i = 0; i < count; ++i) {
old = rcu_dereference_protected(fobj->shared[i],
RCU_INIT_POINTER(fobj->shared[i], fence);
/* pointer update must be visible before we extend the shared_count */
smp_store_mb(fobj->shared_count, count);
+
+ write_seqcount_end(&obj->seq);
+ preempt_enable();
dma_fence_put(old);
}
EXPORT_SYMBOL(dma_resv_add_shared_fence);
dma_fence_get(fence);
preempt_disable();
- rcu_assign_pointer(obj->fence_excl, fence);
- /* pointer update must be visible before we modify the shared_count */
+ write_seqcount_begin(&obj->seq);
+ /* write_seqcount_begin provides the necessary memory barrier */
+ RCU_INIT_POINTER(obj->fence_excl, fence);
if (old)
- smp_store_mb(old->shared_count, 0);
+ old->shared_count = 0;
+ write_seqcount_end(&obj->seq);
preempt_enable();
/* inplace update, no shared fences */
{
struct dma_resv_list *src_list, *dst_list;
struct dma_fence *old, *new;
- unsigned int i, shared_count;
+ unsigned i;
dma_resv_assert_held(dst);
rcu_read_lock();
+ src_list = rcu_dereference(src->fence);
retry:
- dma_resv_fences(src, &new, &src_list, &shared_count);
- if (shared_count) {
+ if (src_list) {
+ unsigned shared_count = src_list->shared_count;
+
rcu_read_unlock();
dst_list = dma_resv_list_alloc(shared_count);
return -ENOMEM;
rcu_read_lock();
- dma_resv_fences(src, &new, &src_list, &shared_count);
- if (!src_list || shared_count > dst_list->shared_max) {
+ src_list = rcu_dereference(src->fence);
+ if (!src_list || src_list->shared_count > shared_count) {
kfree(dst_list);
goto retry;
}
dst_list->shared_count = 0;
- for (i = 0; i < shared_count; ++i) {
+ for (i = 0; i < src_list->shared_count; ++i) {
struct dma_fence *fence;
fence = rcu_dereference(src_list->shared[i]);
if (!dma_fence_get_rcu(fence)) {
dma_resv_list_free(dst_list);
+ src_list = rcu_dereference(src->fence);
goto retry;
}
dst_list = NULL;
}
- if (new && !dma_fence_get_rcu(new)) {
- dma_resv_list_free(dst_list);
- goto retry;
- }
+ new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
src_list = dma_resv_get_list(dst);
old = dma_resv_get_excl(dst);
preempt_disable();
- rcu_assign_pointer(dst->fence_excl, new);
- rcu_assign_pointer(dst->fence, dst_list);
+ write_seqcount_begin(&dst->seq);
+ /* write_seqcount_begin provides the necessary memory barrier */
+ RCU_INIT_POINTER(dst->fence_excl, new);
+ RCU_INIT_POINTER(dst->fence, dst_list);
+ write_seqcount_end(&dst->seq);
preempt_enable();
dma_resv_list_free(src_list);
do {
struct dma_resv_list *fobj;
- unsigned int i;
+ unsigned int i, seq;
size_t sz = 0;
- i = 0;
+ shared_count = i = 0;
rcu_read_lock();
- dma_resv_fences(obj, &fence_excl, &fobj,
- &shared_count);
+ seq = read_seqcount_begin(&obj->seq);
+ fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl && !dma_fence_get_rcu(fence_excl))
goto unlock;
+ fobj = rcu_dereference(obj->fence);
if (fobj)
sz += sizeof(*shared) * fobj->shared_max;
break;
}
shared = nshared;
+ shared_count = fobj ? fobj->shared_count : 0;
for (i = 0; i < shared_count; ++i) {
shared[i] = rcu_dereference(fobj->shared[i]);
if (!dma_fence_get_rcu(shared[i]))
}
}
- if (i != shared_count) {
+ if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
while (i--)
dma_fence_put(shared[i]);
dma_fence_put(fence_excl);
bool wait_all, bool intr,
unsigned long timeout)
{
- struct dma_resv_list *fobj;
struct dma_fence *fence;
- unsigned shared_count;
+ unsigned seq, shared_count;
long ret = timeout ? timeout : 1;
int i;
retry:
+ shared_count = 0;
+ seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
i = -1;
- dma_resv_fences(obj, &fence, &fobj, &shared_count);
+ fence = rcu_dereference(obj->fence_excl);
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (!dma_fence_get_rcu(fence))
goto unlock_retry;
}
if (wait_all) {
+ struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+
+ if (fobj)
+ shared_count = fobj->shared_count;
+
for (i = 0; !fence && i < shared_count; ++i) {
struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
rcu_read_unlock();
if (fence) {
+ if (read_seqcount_retry(&obj->seq, seq)) {
+ dma_fence_put(fence);
+ goto retry;
+ }
+
ret = dma_fence_wait_timeout(fence, intr, ret);
dma_fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
*/
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
{
- struct dma_resv_list *fobj;
- struct dma_fence *fence_excl;
- unsigned shared_count;
+ unsigned seq, shared_count;
int ret;
rcu_read_lock();
retry:
ret = true;
+ shared_count = 0;
+ seq = read_seqcount_begin(&obj->seq);
- dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
if (test_all) {
unsigned i;
+ struct dma_resv_list *fobj = rcu_dereference(obj->fence);
+
+ if (fobj)
+ shared_count = fobj->shared_count;
+
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
else if (!ret)
break;
}
- }
- if (!shared_count && fence_excl) {
- ret = dma_resv_test_signaled_single(fence_excl);
- if (ret < 0)
+ if (read_seqcount_retry(&obj->seq, seq))
goto retry;
}
+ if (!shared_count) {
+ struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+
+ if (fence_excl) {
+ ret = dma_resv_test_signaled_single(fence_excl);
+ if (ret < 0)
+ goto retry;
+
+ if (read_seqcount_retry(&obj->seq, seq))
+ goto retry;
+ }
+ }
+
rcu_read_unlock();
return ret;
}
new->shared_max = old->shared_max;
new->shared_count = k;
- rcu_assign_pointer(resv->fence, new);
+ /* Install the new fence list, seqcount provides the barriers */
+ preempt_disable();
+ write_seqcount_begin(&resv->seq);
+ RCU_INIT_POINTER(resv->fence, new);
+ write_seqcount_end(&resv->seq);
+ preempt_enable();
/* Drop the references to the removed fences or move them to ef_list */
for (i = j, k = 0; i < old->shared_count; ++i) {
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
struct dma_resv_list *list;
- unsigned int i, shared_count;
- struct dma_fence *excl;
+ unsigned int seq;
int err;
err = -ENOENT;
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
- dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
+retry:
+ seq = raw_read_seqcount(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy = busy_check_writer(excl);
+ args->busy =
+ busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
/* Translate shared fences to READ set of engines */
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence = rcu_dereference(list->shared[i]);
+ list = rcu_dereference(obj->base.resv->fence);
+ if (list) {
+ unsigned int shared_count = list->shared_count, i;
- args->busy |= busy_check_reader(fence);
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *fence =
+ rcu_dereference(list->shared[i]);
+
+ args->busy |= busy_check_reader(fence);
+ }
}
+ if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
+ goto retry;
+
err = 0;
out:
rcu_read_unlock();
#include <linux/rcupdate.h>
extern struct ww_class reservation_ww_class;
+extern struct lock_class_key reservation_seqcount_class;
+extern const char reservation_seqcount_string[];
/**
* struct dma_resv_list - a list of shared fences
*/
struct dma_resv {
struct ww_mutex lock;
+ seqcount_t seq;
struct dma_fence __rcu *fence_excl;
struct dma_resv_list __rcu *fence;
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
-/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any). Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
-{
- return rcu_dereference_protected(obj->fence_excl,
- dma_resv_held(obj));
-}
-
/**
* dma_resv_get_list - get the reservation object's
* shared fence list, with update-side lock held
dma_resv_held(obj));
}
-/**
- * dma_resv_fences - read consistent fence pointers
- * @obj: reservation object where we get the fences from
- * @excl: pointer for the exclusive fence
- * @list: pointer for the shared fence list
- *
- * Make sure we have a consisten exclusive fence and shared fence list.
- * Must be called with rcu read side lock held.
- */
-static inline void dma_resv_fences(struct dma_resv *obj,
- struct dma_fence **excl,
- struct dma_resv_list **list,
- u32 *shared_count)
-{
- do {
- *excl = rcu_dereference(obj->fence_excl);
- *list = rcu_dereference(obj->fence);
- *shared_count = *list ? (*list)->shared_count : 0;
- smp_rmb(); /* See dma_resv_add_excl_fence */
- } while (rcu_access_pointer(obj->fence_excl) != *excl);
-}
-
-/**
- * dma_resv_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
-{
- struct dma_fence *fence;
-
- if (!rcu_access_pointer(obj->fence_excl))
- return NULL;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&obj->fence_excl);
- rcu_read_unlock();
-
- return fence;
-}
-
/**
* dma_resv_lock - lock the reservation object
* @obj: the reservation object
ww_mutex_unlock(&obj->lock);
}
+/**
+ * dma_resv_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any). Does NOT take a
+ * reference. Writers must hold obj->lock, readers may only
+ * hold a RCU read side lock.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
+static inline struct dma_fence *
+dma_resv_get_excl(struct dma_resv *obj)
+{
+ return rcu_dereference_protected(obj->fence_excl,
+ dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct dma_fence *
+dma_resv_get_excl_rcu(struct dma_resv *obj)
+{
+ struct dma_fence *fence;
+
+ if (!rcu_access_pointer(obj->fence_excl))
+ return NULL;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&obj->fence_excl);
+ rcu_read_unlock();
+
+ return fence;
+}
+
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);