drm/i915: Extract intel_frontbuffer active tracking
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 16 Aug 2019 07:46:35 +0000 (08:46 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 16 Aug 2019 08:51:11 +0000 (09:51 +0100)
Move the active tracking for the frontbuffer operations out of the
i915_gem_object and into its own first class (refcounted) object. In the
process of detangling, we switch from low level request tracking to the
easier i915_active -- with the plan that this avoids any potential
atomic callbacks as the frontbuffer tracking wishes to sleep as it
flushes.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190816074635.26062-1-chris@chris-wilson.co.uk
17 files changed:
Documentation/gpu/i915.rst
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/display/intel_frontbuffer.c
drivers/gpu/drm/i915/display/intel_frontbuffer.h
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_vma.c

index 0e322688be5c9c71572fbfa861e15b710dd6b925..3415255ad3dcafad0468245ee75075e44db8f653 100644 (file)
@@ -91,9 +91,6 @@ Frontbuffer Tracking
 .. kernel-doc:: drivers/gpu/drm/i915/display/intel_frontbuffer.c
    :internal:
 
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem.c
-   :functions: i915_gem_track_fb
-
 Display FIFO Underrun Reporting
 -------------------------------
 
index 3916c697ffab17882be5918241e6b754011614e3..59e1988dd36275b2b140eafd1ed5d0ab319d674a 100644 (file)
@@ -3049,12 +3049,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_object *obj = NULL;
        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
        struct drm_framebuffer *fb = &plane_config->fb->base;
        u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
        u32 size_aligned = round_up(plane_config->base + plane_config->size,
                                    PAGE_SIZE);
+       struct drm_i915_gem_object *obj;
+       bool ret = false;
 
        size_aligned -= base_aligned;
 
@@ -3096,7 +3097,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
                break;
        default:
                MISSING_CASE(plane_config->tiling);
-               return false;
+               goto out;
        }
 
        mode_cmd.pixel_format = fb->format->format;
@@ -3108,16 +3109,15 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
 
        if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
                DRM_DEBUG_KMS("intel fb init failed\n");
-               goto out_unref_obj;
+               goto out;
        }
 
 
        DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
-       return true;
-
-out_unref_obj:
+       ret = true;
+out:
        i915_gem_object_put(obj);
-       return false;
+       return ret;
 }
 
 static void
@@ -3174,6 +3174,12 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
        intel_disable_plane(plane, crtc_state);
 }
 
+static struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+       return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
 static void
 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
                             struct intel_initial_plane_config *plane_config)
@@ -3181,7 +3187,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
-       struct drm_i915_gem_object *obj;
        struct drm_plane *primary = intel_crtc->base.primary;
        struct drm_plane_state *plane_state = primary->state;
        struct intel_plane *intel_plane = to_intel_plane(primary);
@@ -3257,8 +3262,7 @@ valid_fb:
                return;
        }
 
-       obj = intel_fb_obj(fb);
-       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+       intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
 
        plane_state->src_x = 0;
        plane_state->src_y = 0;
@@ -3273,14 +3277,14 @@ valid_fb:
        intel_state->base.src = drm_plane_state_src(plane_state);
        intel_state->base.dst = drm_plane_state_dest(plane_state);
 
-       if (i915_gem_object_is_tiled(obj))
+       if (plane_config->tiling)
                dev_priv->preserve_bios_swizzle = true;
 
        plane_state->fb = fb;
        plane_state->crtc = &intel_crtc->base;
 
        atomic_or(to_intel_plane(primary)->frontbuffer_bit,
-                 &obj->frontbuffer_bits);
+                 &to_intel_frontbuffer(fb)->bits);
 }
 
 static int skl_max_plane_width(const struct drm_framebuffer *fb,
@@ -14132,9 +14136,9 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
 
        for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
                                             new_plane_state, i)
-               i915_gem_track_fb(intel_fb_obj(old_plane_state->base.fb),
-                                 intel_fb_obj(new_plane_state->base.fb),
-                                 plane->frontbuffer_bit);
+               intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
+                                       to_intel_frontbuffer(new_plane_state->base.fb),
+                                       plane->frontbuffer_bit);
 }
 
 static int intel_atomic_commit(struct drm_device *dev,
@@ -14418,7 +14422,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                return ret;
 
        fb_obj_bump_render_priority(obj);
-       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
 
        if (!new_state->fence) { /* implicit fencing */
                struct dma_fence *fence;
@@ -14681,13 +14685,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
                           struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       int ret;
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       struct drm_framebuffer *old_fb;
        struct intel_crtc_state *crtc_state =
                to_intel_crtc_state(crtc->state);
        struct intel_crtc_state *new_crtc_state;
+       int ret;
 
        /*
         * When crtc is inactive or there is a modeset pending,
@@ -14755,11 +14758,10 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        if (ret)
                goto out_unlock;
 
-       intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
-
-       old_fb = old_plane_state->fb;
-       i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
-                         intel_plane->frontbuffer_bit);
+       intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
+       intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
+                               to_intel_frontbuffer(fb),
+                               intel_plane->frontbuffer_bit);
 
        /* Swap plane state */
        plane->state = new_plane_state;
@@ -15540,15 +15542,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
        drm_framebuffer_cleanup(fb);
-
-       i915_gem_object_lock(obj);
-       WARN_ON(!obj->framebuffer_references--);
-       i915_gem_object_unlock(obj);
-
-       i915_gem_object_put(obj);
+       intel_frontbuffer_put(intel_fb->frontbuffer);
 
        kfree(intel_fb);
 }
@@ -15576,7 +15572,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
        i915_gem_object_flush_if_display(obj);
-       intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+       intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
 
        return 0;
 }
@@ -15598,8 +15594,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
        int ret = -EINVAL;
        int i;
 
+       intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+       if (!intel_fb->frontbuffer)
+               return -ENOMEM;
+
        i915_gem_object_lock(obj);
-       obj->framebuffer_references++;
        tiling = i915_gem_object_get_tiling(obj);
        stride = i915_gem_object_get_stride(obj);
        i915_gem_object_unlock(obj);
@@ -15716,9 +15715,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
        return 0;
 
 err:
-       i915_gem_object_lock(obj);
-       obj->framebuffer_references--;
-       i915_gem_object_unlock(obj);
+       intel_frontbuffer_put(intel_fb->frontbuffer);
        return ret;
 }
 
@@ -15736,8 +15733,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
                return ERR_PTR(-ENOENT);
 
        fb = intel_framebuffer_create(obj, &mode_cmd);
-       if (IS_ERR(fb))
-               i915_gem_object_put(obj);
+       i915_gem_object_put(obj);
 
        return fb;
 }
index a88ec9aa9ca05425b509af927fdc4786d974741e..3c1a5f3e1d22ed68f4ab7d863b79fe7736a81432 100644 (file)
@@ -84,6 +84,7 @@ enum intel_broadcast_rgb {
 
 struct intel_framebuffer {
        struct drm_framebuffer base;
+       struct intel_frontbuffer *frontbuffer;
        struct intel_rotation_info rot_info;
 
        /* for each plane in the normal GTT view */
index 2d74c33a504d6dd22262f63a4848d167b11d5d04..d59eee5c5d9c2195bcdc25481c4cc7713ab3a6a6 100644 (file)
 #include "intel_fbdev.h"
 #include "intel_frontbuffer.h"
 
-static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
 {
-       struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
-       unsigned int origin =
-               ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
+       return ifbdev->fb->frontbuffer;
+}
 
-       intel_fb_obj_invalidate(obj, origin);
+static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+{
+       intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
 }
 
 static int intel_fbdev_set_par(struct fb_info *info)
@@ -120,7 +121,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_mode_fb_cmd2 mode_cmd = {};
        struct drm_i915_gem_object *obj;
-       int size, ret;
+       int size;
 
        /* we don't do packed 24bpp */
        if (sizes->surface_bpp == 24)
@@ -147,24 +148,16 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
                obj = i915_gem_object_create_shmem(dev_priv, size);
        if (IS_ERR(obj)) {
                DRM_ERROR("failed to allocate framebuffer\n");
-               ret = PTR_ERR(obj);
-               goto err;
+               return PTR_ERR(obj);
        }
 
        fb = intel_framebuffer_create(obj, &mode_cmd);
-       if (IS_ERR(fb)) {
-               ret = PTR_ERR(fb);
-               goto err_obj;
-       }
+       i915_gem_object_put(obj);
+       if (IS_ERR(fb))
+               return PTR_ERR(fb);
 
        ifbdev->fb = to_intel_framebuffer(fb);
-
        return 0;
-
-err_obj:
-       i915_gem_object_put(obj);
-err:
-       return ret;
 }
 
 static int intelfb_create(struct drm_fb_helper *helper,
@@ -180,7 +173,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
        const struct i915_ggtt_view view = {
                .type = I915_GGTT_VIEW_NORMAL,
        };
-       struct drm_framebuffer *fb;
        intel_wakeref_t wakeref;
        struct fb_info *info;
        struct i915_vma *vma;
@@ -226,8 +218,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
                goto out_unlock;
        }
 
-       fb = &ifbdev->fb->base;
-       intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+       intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
 
        info = drm_fb_helper_alloc_fbi(helper);
        if (IS_ERR(info)) {
@@ -236,7 +227,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
                goto out_unpin;
        }
 
-       ifbdev->helper.fb = fb;
+       ifbdev->helper.fb = &ifbdev->fb->base;
 
        info->fbops = &intelfb_ops;
 
@@ -263,13 +254,14 @@ static int intelfb_create(struct drm_fb_helper *helper,
         * If the object is stolen however, it will be full of whatever
         * garbage was left in there.
         */
-       if (intel_fb_obj(fb)->stolen && !prealloc)
+       if (vma->obj->stolen && !prealloc)
                memset_io(info->screen_base, 0, info->screen_size);
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
        DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
-                     fb->width, fb->height, i915_ggtt_offset(vma));
+                     ifbdev->fb->base.width, ifbdev->fb->base.height,
+                     i915_ggtt_offset(vma));
        ifbdev->vma = vma;
        ifbdev->vma_flags = flags;
 
index 9cda88e41d29a031239e1562e95487672a013a22..719379774fa542e0b1426e169b581c1fc85f1552 100644 (file)
  * Many features require us to track changes to the currently active
  * frontbuffer, especially rendering targeted at the frontbuffer.
  *
- * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
- * frontbuffer slots through i915_gem_track_fb(). The function in this file are
- * then called when the contents of the frontbuffer are invalidated, when
- * frontbuffer rendering has stopped again to flush out all the changes and when
- * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * To be able to do so we track frontbuffers using a bitmask for all possible
+ * frontbuffer slots through intel_frontbuffer_track(). The functions in this
+ * file are then called when the contents of the frontbuffer are invalidated,
+ * when frontbuffer rendering has stopped again to flush out all the changes
+ * and when the frontbuffer is exchanged with a flip. Subsystems interested in
  * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
  * into the relevant places and filter for the frontbuffer slots that they are
  * interested int.
 #include "intel_frontbuffer.h"
 #include "intel_psr.h"
 
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                              enum fb_op_origin origin,
-                              unsigned int frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
-       if (origin == ORIGIN_CS) {
-               spin_lock(&dev_priv->fb_tracking.lock);
-               dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
-               dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-               spin_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       might_sleep();
-       intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
-       intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
-       intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
-}
-
 /**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev_priv: i915 device
+ * frontbuffer_flush - flush frontbuffer
+ * @i915: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -94,45 +75,27 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
-                                   unsigned frontbuffer_bits,
-                                   enum fb_op_origin origin)
+static void frontbuffer_flush(struct drm_i915_private *i915,
+                             unsigned int frontbuffer_bits,
+                             enum fb_op_origin origin)
 {
        /* Delay flushing when rings are still busy.*/
-       spin_lock(&dev_priv->fb_tracking.lock);
-       frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-       spin_unlock(&dev_priv->fb_tracking.lock);
+       spin_lock(&i915->fb_tracking.lock);
+       frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
+       spin_unlock(&i915->fb_tracking.lock);
 
        if (!frontbuffer_bits)
                return;
 
        might_sleep();
-       intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
-       intel_psr_flush(dev_priv, frontbuffer_bits, origin);
-       intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
-}
-
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                         enum fb_op_origin origin,
-                         unsigned int frontbuffer_bits)
-{
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
-       if (origin == ORIGIN_CS) {
-               spin_lock(&dev_priv->fb_tracking.lock);
-               /* Filter out new bits since rendering started. */
-               frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-               dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-               spin_unlock(&dev_priv->fb_tracking.lock);
-       }
-
-       if (frontbuffer_bits)
-               intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
+       intel_edp_drrs_flush(i915, frontbuffer_bits);
+       intel_psr_flush(i915, frontbuffer_bits, origin);
+       intel_fbc_flush(i915, frontbuffer_bits, origin);
 }
 
 /**
  * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. The actual
@@ -142,19 +105,19 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
                                    unsigned frontbuffer_bits)
 {
-       spin_lock(&dev_priv->fb_tracking.lock);
-       dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+       spin_lock(&i915->fb_tracking.lock);
+       i915->fb_tracking.flip_bits |= frontbuffer_bits;
        /* Remove stale busy bits due to the old buffer. */
-       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-       spin_unlock(&dev_priv->fb_tracking.lock);
+       i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+       spin_unlock(&i915->fb_tracking.lock);
 }
 
 /**
  * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after the flip has been latched and will complete
@@ -162,23 +125,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
                                     unsigned frontbuffer_bits)
 {
-       spin_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&i915->fb_tracking.lock);
        /* Mask any cancelled flips. */
-       frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
-       dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-       spin_unlock(&dev_priv->fb_tracking.lock);
+       frontbuffer_bits &= i915->fb_tracking.flip_bits;
+       i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+       spin_unlock(&i915->fb_tracking.lock);
 
        if (frontbuffer_bits)
-               intel_frontbuffer_flush(dev_priv,
-                                       frontbuffer_bits, ORIGIN_FLIP);
+               frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
  * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. This is for
@@ -187,13 +149,160 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
                            unsigned frontbuffer_bits)
 {
-       spin_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&i915->fb_tracking.lock);
        /* Remove stale busy bits due to the old buffer. */
-       dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-       spin_unlock(&dev_priv->fb_tracking.lock);
+       i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+       spin_unlock(&i915->fb_tracking.lock);
 
-       intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
+       frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+                          enum fb_op_origin origin,
+                          unsigned int frontbuffer_bits)
+{
+       struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+       if (origin == ORIGIN_CS) {
+               spin_lock(&i915->fb_tracking.lock);
+               i915->fb_tracking.busy_bits |= frontbuffer_bits;
+               i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+               spin_unlock(&i915->fb_tracking.lock);
+       }
+
+       might_sleep();
+       intel_psr_invalidate(i915, frontbuffer_bits, origin);
+       intel_edp_drrs_invalidate(i915, frontbuffer_bits);
+       intel_fbc_invalidate(i915, frontbuffer_bits, origin);
+}
+
+void __intel_fb_flush(struct intel_frontbuffer *front,
+                     enum fb_op_origin origin,
+                     unsigned int frontbuffer_bits)
+{
+       struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+       if (origin == ORIGIN_CS) {
+               spin_lock(&i915->fb_tracking.lock);
+               /* Filter out new bits since rendering started. */
+               frontbuffer_bits &= i915->fb_tracking.busy_bits;
+               i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+               spin_unlock(&i915->fb_tracking.lock);
+       }
+
+       if (frontbuffer_bits)
+               frontbuffer_flush(i915, frontbuffer_bits, origin);
+}
+
+static int frontbuffer_active(struct i915_active *ref)
+{
+       struct intel_frontbuffer *front =
+               container_of(ref, typeof(*front), write);
+
+       kref_get(&front->ref);
+       return 0;
+}
+
+static void frontbuffer_retire(struct i915_active *ref)
+{
+       struct intel_frontbuffer *front =
+               container_of(ref, typeof(*front), write);
+
+       intel_frontbuffer_flush(front, ORIGIN_CS);
+       intel_frontbuffer_put(front);
+}
+
+static void frontbuffer_release(struct kref *ref)
+       __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
+{
+       struct intel_frontbuffer *front =
+               container_of(ref, typeof(*front), ref);
+
+       front->obj->frontbuffer = NULL;
+       spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
+
+       i915_gem_object_put(front->obj);
+       kfree(front);
+}
+
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct intel_frontbuffer *front;
+
+       spin_lock(&i915->fb_tracking.lock);
+       front = obj->frontbuffer;
+       if (front)
+               kref_get(&front->ref);
+       spin_unlock(&i915->fb_tracking.lock);
+       if (front)
+               return front;
+
+       front = kmalloc(sizeof(*front), GFP_KERNEL);
+       if (!front)
+               return NULL;
+
+       front->obj = obj;
+       kref_init(&front->ref);
+       atomic_set(&front->bits, 0);
+       i915_active_init(i915, &front->write,
+                        frontbuffer_active, frontbuffer_retire);
+
+       spin_lock(&i915->fb_tracking.lock);
+       if (obj->frontbuffer) {
+               kfree(front);
+               front = obj->frontbuffer;
+               kref_get(&front->ref);
+       } else {
+               i915_gem_object_get(obj);
+               obj->frontbuffer = front;
+       }
+       spin_unlock(&i915->fb_tracking.lock);
+
+       return front;
+}
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front)
+{
+       kref_put_lock(&front->ref,
+                     frontbuffer_release,
+                     &to_i915(front->obj->base.dev)->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_track - update frontbuffer tracking
+ * @old: current buffer for the frontbuffer slots
+ * @new: new buffer for the frontbuffer slots
+ * @frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+                            struct intel_frontbuffer *new,
+                            unsigned int frontbuffer_bits)
+{
+       /*
+        * Control of individual bits within the mask are guarded by
+        * the owning plane->mutex, i.e. we can never see concurrent
+        * manipulation of individual bits. But since the bitfield as a whole
+        * is updated using RMW, we need to use atomics in order to update
+        * the bits.
+        */
+       BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+                    BITS_PER_TYPE(atomic_t));
+
+       if (old) {
+               WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
+               atomic_andnot(frontbuffer_bits, &old->bits);
+       }
+
+       if (new) {
+               WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
+               atomic_or(frontbuffer_bits, &new->bits);
+       }
 }
index 5727320c8084bedb895d897afb74e19748fc3304..adc64d61a4a5c424275d1b2eb71f0101531624d2 100644 (file)
 #ifndef __INTEL_FRONTBUFFER_H__
 #define __INTEL_FRONTBUFFER_H__
 
-#include "gem/i915_gem_object.h"
+#include <linux/atomic.h>
+#include <linux/kref.h>
+
+#include "i915_active.h"
 
 struct drm_i915_private;
 struct drm_i915_gem_object;
@@ -37,23 +40,30 @@ enum fb_op_origin {
        ORIGIN_DIRTYFB,
 };
 
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+struct intel_frontbuffer {
+       struct kref ref;
+       atomic_t bits;
+       struct i915_active write;
+       struct drm_i915_gem_object *obj;
+};
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
                                    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
                                     unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
                            unsigned frontbuffer_bits);
 
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                              enum fb_op_origin origin,
-                              unsigned int frontbuffer_bits);
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                         enum fb_op_origin origin,
-                         unsigned int frontbuffer_bits);
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj);
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+                          enum fb_op_origin origin,
+                          unsigned int frontbuffer_bits);
 
 /**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
+ * intel_frontbuffer_invalidate - invalidate frontbuffer object
+ * @front: GEM object to invalidate
  * @origin: which operation caused the invalidation
  *
  * This function gets called every time rendering on the given object starts and
@@ -62,37 +72,53 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
-static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                                          enum fb_op_origin origin)
+static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+                                               enum fb_op_origin origin)
 {
        unsigned int frontbuffer_bits;
 
-       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+       if (!front)
+               return false;
+
+       frontbuffer_bits = atomic_read(&front->bits);
        if (!frontbuffer_bits)
                return false;
 
-       __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+       __intel_fb_invalidate(front, origin, frontbuffer_bits);
        return true;
 }
 
+void __intel_fb_flush(struct intel_frontbuffer *front,
+                     enum fb_op_origin origin,
+                     unsigned int frontbuffer_bits);
+
 /**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
+ * intel_frontbuffer_flush - flush frontbuffer object
+ * @front: GEM object to flush
  * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given object has
  * completed and frontbuffer caching can be started again.
  */
-static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                                     enum fb_op_origin origin)
+static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
+                                          enum fb_op_origin origin)
 {
        unsigned int frontbuffer_bits;
 
-       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+       if (!front)
+               return;
+
+       frontbuffer_bits = atomic_read(&front->bits);
        if (!frontbuffer_bits)
                return;
 
-       __intel_fb_obj_flush(obj, origin, frontbuffer_bits);
+       __intel_fb_flush(front, origin, frontbuffer_bits);
 }
 
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+                            struct intel_frontbuffer *new,
+                            unsigned int frontbuffer_bits);
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
 #endif /* __INTEL_FRONTBUFFER_H__ */
index 4f78586ee05ee1c7b76f42dbacfe97d04efefc67..e1248eace0e1690b635cbce18077d48e72f5a73a 100644 (file)
@@ -281,9 +281,9 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
 
        WARN_ON(overlay->old_vma);
 
-       i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
-                         vma ? vma->obj : NULL,
-                         INTEL_FRONTBUFFER_OVERLAY(pipe));
+       intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
+                               vma ? vma->obj->frontbuffer : NULL,
+                               INTEL_FRONTBUFFER_OVERLAY(pipe));
 
        intel_frontbuffer_flip_prepare(overlay->i915,
                                       INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -768,7 +768,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                ret = PTR_ERR(vma);
                goto out_pin_section;
        }
-       intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
+       intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
 
        ret = i915_vma_put_fence(vma);
        if (ret)
index c31684682eaad34b54597804663f7f0900df6f38..77944950d4c9d0add8edcc0cf614378a3865c7ed 100644 (file)
@@ -49,7 +49,7 @@ static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
        drm_clflush_sg(obj->mm.pages);
-       intel_fb_obj_flush(obj, ORIGIN_CPU);
+       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 }
 
 static void i915_clflush_work(struct work_struct *work)
index 2e3ce2a6965395ad2eceff5b981c20a4059badce..a1afc2690e9e109ebf857f7babc8df79a9a8c7e0 100644 (file)
@@ -551,13 +551,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        return 0;
 }
 
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
-       return (domain == I915_GEM_DOMAIN_GTT ?
-               obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
@@ -661,9 +654,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 
        i915_gem_object_unlock(obj);
 
-       if (write_domain != 0)
-               intel_fb_obj_invalidate(obj,
-                                       fb_write_origin(obj, write_domain));
+       if (write_domain)
+               intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
 
 out_unpin:
        i915_gem_object_unpin_pages(obj);
@@ -783,7 +775,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
        }
 
 out:
-       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
        obj->mm.dirty = true;
        /* return with the pages pinned */
        return 0;
index cfaedb4b70bd1b0d95208f1fe9238aec29888a47..501ab55f12808e9d041c9626eb094adb10185e53 100644 (file)
@@ -101,9 +101,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                up_write(&mm->mmap_sem);
                if (IS_ERR_VALUE(addr))
                        goto err;
-
-               /* This may race, but that's ok, it only gets set */
-               WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
        }
        i915_gem_object_put(obj);
 
@@ -283,7 +280,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
                 * Userspace is now writing through an untracked VMA, abandon
                 * all hope that the hardware is able to track future writes.
                 */
-               obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
 
                vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
                if (IS_ERR(vma) && !view.type) {
index 3929c3a6b28100456bb728d1bce923d3f35faafa..0807bb5464cf404a071dd6399e0fb64906049801 100644 (file)
@@ -46,16 +46,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
        return kmem_cache_free(global.slab_objects, obj);
 }
 
-static void
-frontbuffer_retire(struct i915_active_request *active,
-                  struct i915_request *request)
-{
-       struct drm_i915_gem_object *obj =
-               container_of(active, typeof(*obj), frontbuffer_write);
-
-       intel_fb_obj_flush(obj, ORIGIN_CS);
-}
-
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops)
 {
@@ -72,10 +62,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
        obj->ops = ops;
 
-       obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
-       i915_active_request_init(&obj->frontbuffer_write,
-                                NULL, frontbuffer_retire);
-
        obj->mm.madv = I915_MADV_WILLNEED;
        INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->mm.get_page.lock);
@@ -187,7 +173,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
                GEM_BUG_ON(atomic_read(&obj->bind_count));
                GEM_BUG_ON(obj->userfault_count);
-               GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
                GEM_BUG_ON(!list_empty(&obj->lut_list));
 
                atomic_set(&obj->mm.pages_pin_count, 0);
@@ -230,6 +215,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
+       GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+
        /*
         * Before we free the object, make sure any pure RCU-only
         * read-side critical sections are complete, e.g.
@@ -261,13 +248,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                queue_work(i915->wq, &i915->mm.free_work);
 }
 
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
-       return (domain == I915_GEM_DOMAIN_GTT ?
-               obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
        return !(obj->cache_level == I915_CACHE_NONE ||
@@ -290,8 +270,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
                for_each_ggtt_vma(vma, obj)
                        intel_gt_flush_ggtt_writes(vma->vm->gt);
 
-               intel_fb_obj_flush(obj,
-                                  fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+               intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 
                for_each_ggtt_vma(vma, obj) {
                        if (vma->iomap)
index 3714cf234d640a1c57db9408e2b32324584330fb..abc23e7e13a76bd24eafd62e617d3a7565994097 100644 (file)
@@ -161,7 +161,7 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 static inline bool
 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 {
-       return READ_ONCE(obj->framebuffer_references);
+       return READ_ONCE(obj->frontbuffer);
 }
 
 static inline unsigned int
index d474c6ac4100f19064483850f8586750702f1a66..ede0eb4218a81b9f1c67390afe7b2433afdf7a49 100644 (file)
@@ -13,6 +13,7 @@
 #include "i915_selftest.h"
 
 struct drm_i915_gem_object;
+struct intel_fronbuffer;
 
 /*
  * struct i915_lut_handle tracks the fast lookups from handle to vma used
@@ -141,9 +142,7 @@ struct drm_i915_gem_object {
         */
        u16 write_domain;
 
-       atomic_t frontbuffer_bits;
-       unsigned int frontbuffer_ggtt_origin; /* write once */
-       struct i915_active_request frontbuffer_write;
+       struct intel_frontbuffer *frontbuffer;
 
        /** Current tiling stride for the object, if it's tiled. */
        unsigned int tiling_and_stride;
@@ -224,9 +223,6 @@ struct drm_i915_gem_object {
                bool quirked:1;
        } mm;
 
-       /** References from framebuffers, locks out tiling changes. */
-       unsigned int framebuffer_references;
-
        /** Record of address bit 17 of each page at last unbind. */
        unsigned long *bit_17;
 
index ef4f84e060ee53f7f3326ba8b3ae8357f019c6d7..e7ce739fe5455ffe5006ae4a0fb99665e27469fe 100644 (file)
@@ -138,7 +138,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct intel_engine_cs *engine;
        struct i915_vma *vma;
-       unsigned int frontbuffer_bits;
        int pin_count = 0;
 
        seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
@@ -228,10 +227,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
        engine = i915_gem_object_last_write_engine(obj);
        if (engine)
                seq_printf(m, " (%s)", engine->name);
-
-       frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
-       if (frontbuffer_bits)
-               seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
 }
 
 struct file_stats {
index 50effdccc76c04da4bd1e2d92366c99990600ff2..b6beae12cd2190c28c45491c4cd5beb73f9b5432 100644 (file)
@@ -2355,10 +2355,6 @@ int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      u32 handle, u64 *offset);
 int i915_gem_mmap_gtt_version(void);
 
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
-                      struct drm_i915_gem_object *new,
-                      unsigned frontbuffer_bits);
-
 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 
 static inline u32 i915_reset_count(struct i915_gpu_error *error)
index 29be25a7aaded8603ee19effba75b3187603c3ee..71ee4c7102520d03c958c2f7e47a215ce39ed1ec 100644 (file)
@@ -139,17 +139,19 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        void *vaddr = obj->phys_handle->vaddr + args->offset;
        char __user *user_data = u64_to_user_ptr(args->data_ptr);
 
-       /* We manually control the domain here and pretend that it
+       /*
+        * We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
         */
-       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+
        if (copy_from_user(vaddr, user_data, args->size))
                return -EFAULT;
 
        drm_clflush_virt_range(vaddr, args->size);
        intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
-       intel_fb_obj_flush(obj, ORIGIN_CPU);
+       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
        return 0;
 }
 
@@ -594,7 +596,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                goto out_unpin;
        }
 
-       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
 
        user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
@@ -636,7 +638,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                user_data += page_length;
                offset += page_length;
        }
-       intel_fb_obj_flush(obj, ORIGIN_CPU);
+       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 
        i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
@@ -729,7 +731,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
                offset = 0;
        }
 
-       intel_fb_obj_flush(obj, ORIGIN_CPU);
+       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
        i915_gem_object_unlock_fence(obj, fence);
 
        return ret;
@@ -1763,39 +1765,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
        return ret;
 }
 
-/**
- * i915_gem_track_fb - update frontbuffer tracking
- * @old: current GEM buffer for the frontbuffer slots
- * @new: new GEM buffer for the frontbuffer slots
- * @frontbuffer_bits: bitmask of frontbuffer slots
- *
- * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
- * from @old and setting them in @new. Both @old and @new can be NULL.
- */
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
-                      struct drm_i915_gem_object *new,
-                      unsigned frontbuffer_bits)
-{
-       /* Control of individual bits within the mask are guarded by
-        * the owning plane->mutex, i.e. we can never see concurrent
-        * manipulation of individual bits. But since the bitfield as a whole
-        * is updated using RMW, we need to use atomics in order to update
-        * the bits.
-        */
-       BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
-                    BITS_PER_TYPE(atomic_t));
-
-       if (old) {
-               WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
-               atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
-       }
-
-       if (new) {
-               WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
-               atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
-       }
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_gem_device.c"
 #include "selftests/i915_gem.c"
index 8be1bbef40e525e7e897050af13aeb6e40b2a873..d38ef2ef3ce474a67afa121fb8d3e30154a64507 100644 (file)
@@ -908,8 +908,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
                return err;
 
        if (flags & EXEC_OBJECT_WRITE) {
-               if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
-                       __i915_active_request_set(&obj->frontbuffer_write, rq);
+               if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
+                       i915_active_ref(&obj->frontbuffer->write,
+                                       rq->fence.context,
+                                       rq);
 
                reservation_object_add_excl_fence(vma->resv, &rq->fence);
                obj->write_domain = I915_GEM_DOMAIN_RENDER;