struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
struct i915_vma *vma;
+ unsigned int frontbuffer_bits;
int pin_count = 0;
enum intel_engine_id id;
if (engine)
seq_printf(m, " (%s)", engine->name);
- if (obj->frontbuffer_bits)
- seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
+ frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (frontbuffer_bits)
+ seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
}
static int i915_gem_object_list_info(struct seq_file *m, void *data)
*/
#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER_BITS \
- (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
unsigned int cache_level:3;
unsigned int cache_dirty:1;
- unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+ atomic_t frontbuffer_bits;
unsigned int has_wc_mmap;
/** Count of VMA actually bound by this object */
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
- WARN_ON(obj->frontbuffer_bits);
+ WARN_ON(atomic_read(&obj->frontbuffer_bits));
if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
{
+ /* Control of individual bits within the mask are guarded by
+ * the owning plane->mutex, i.e. we can never see concurrent
+ * manipulation of individual bits. But since the bitfield as a whole
+ * is updated using RMW, we need to use atomics in order to update
+ * the bits.
+ */
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+ sizeof(atomic_t) * BITS_PER_BYTE);
+
if (old) {
- WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
- WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
- old->frontbuffer_bits &= ~frontbuffer_bits;
+ WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
+ atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
}
if (new) {
- WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
- WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
- new->frontbuffer_bits |= frontbuffer_bits;
+ WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
+ atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
}
}
primary->fb = primary->state->fb = fb;
primary->crtc = primary->state->crtc = &intel_crtc->base;
intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
- obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
+ atomic_or(to_intel_plane(primary)->frontbuffer_bit,
+ &obj->frontbuffer_bits);
}
static void i9xx_update_primary_plane(struct drm_plane *primary,
{
struct drm_plane_state *old_plane_state;
struct drm_plane *plane;
- struct drm_i915_gem_object *obj, *old_obj;
- struct intel_plane *intel_plane;
int i;
- mutex_lock(&state->dev->struct_mutex);
- for_each_plane_in_state(state, plane, old_plane_state, i) {
- obj = intel_fb_obj(plane->state->fb);
- old_obj = intel_fb_obj(old_plane_state->fb);
- intel_plane = to_intel_plane(plane);
-
- i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
- }
- mutex_unlock(&state->dev->struct_mutex);
+ for_each_plane_in_state(state, plane, old_plane_state, i)
+ i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
+ intel_fb_obj(plane->state->fb),
+ to_intel_plane(plane)->frontbuffer_bit);
}
/**
#include "i915_drv.h"
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
if (origin == ORIGIN_CS) {
spin_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
+ dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
}
- intel_psr_invalidate(dev, obj->frontbuffer_bits);
- intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
- intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
+ intel_psr_invalidate(dev, frontbuffer_bits);
+ intel_edp_drrs_invalidate(dev, frontbuffer_bits);
+ intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
}
/**
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire,
- enum fb_op_origin origin)
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned frontbuffer_bits;
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- frontbuffer_bits = obj->frontbuffer_bits;
if (retire) {
spin_lock(&dev_priv->fb_tracking.lock);
unsigned frontbuffer_bits);
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin);
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire,
- enum fb_op_origin origin);
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
- if (!obj->frontbuffer_bits)
+ unsigned int frontbuffer_bits;
+
+ frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!frontbuffer_bits)
return;
- __intel_fb_obj_invalidate(obj, origin);
+ __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
}
/**
bool retire,
enum fb_op_origin origin)
{
- if (!obj->frontbuffer_bits)
+ unsigned int frontbuffer_bits;
+
+ frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!frontbuffer_bits)
return;
- __intel_fb_obj_flush(obj, retire, origin);
+ __intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
}
#endif /* __INTEL_FRONTBUFFER_H__ */