dma_fence_init(&clflush->dma,
&i915_clflush_ops,
&clflush_lock,
- to_i915(obj->base.dev)->mm.unordered_timeline,
- 0);
+ 0, 0);
i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
clflush->obj = i915_gem_object_get(obj);
struct i915_page_sizes *page_sizes,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
- dma_fence_init(&work->dma,
- &clear_pages_work_ops,
- &fence_lock,
- i915->mm.unordered_timeline,
- 0);
+ dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
i915_gem_object_lock(obj);
i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
- to_i915(obj->base.dev)->mm.unordered_timeline,
- 0);
+ 0, 0);
if (i915_sw_fence_await_reservation(&stub->chain,
obj->base.resv, NULL,
*/
struct workqueue_struct *userptr_wq;
- u64 unordered_timeline;
-
/** Bit 6 swizzling required for X tiling */
u32 bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
- dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
-
intel_timelines_init(dev_priv);
ret = i915_gem_init_userptr(dev_priv);
continue;
/* Squash repeated waits to the same timelines */
- if (fence->context != rq->i915->mm.unordered_timeline &&
+ if (fence->context &&
intel_timeline_sync_is_later(rq->timeline, fence))
continue;
return ret;
/* Record the latest fence used against each timeline */
- if (fence->context != rq->i915->mm.unordered_timeline)
+ if (fence->context)
intel_timeline_sync_set(rq->timeline, fence);
} while (--nchild);