#include <linux/swap.h>
#include <linux/pci.h>
-static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
- bool write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
- uint64_t offset,
- uint64_t size);
+static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+ bool write);
+static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
+ uint64_t offset,
+ uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
-static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
- unsigned alignment,
- bool map_and_fenceable);
+static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable);
static void i915_gem_clear_fence_reg(struct drm_device *dev,
struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev,
return ret;
}
-void
+int
i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
- if (ring->flush(ring, invalidate_domains, flush_domains) == 0)
- i915_gem_process_flushing_list(dev, flush_domains, ring);
+ int ret;
+
+ ret = ring->flush(ring, invalidate_domains, flush_domains);
+ if (ret)
+ return ret;
+
+ i915_gem_process_flushing_list(dev, flush_domains, ring);
+ return 0;
}
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
+ int ret;
+
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
- if (!list_empty(&ring->gpu_write_list))
- i915_gem_flush_ring(dev, ring,
+ if (!list_empty(&ring->gpu_write_list)) {
+ ret = i915_gem_flush_ring(dev, ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+ }
+
return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring),
ring);
int ret;
if (obj->fenced_gpu_access) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
- i915_gem_flush_ring(obj->base.dev,
- obj->last_fenced_ring,
- 0, obj->base.write_domain);
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->base.dev,
+ obj->last_fenced_ring,
+ 0, obj->base.write_domain);
+ if (ret)
+ return ret;
+ }
obj->fenced_gpu_access = false;
}
return ret;
} else if (obj->tiling_changed) {
if (obj->fenced_gpu_access) {
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
- i915_gem_flush_ring(obj->base.dev, obj->ring,
- 0, obj->base.write_domain);
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+ 0, obj->base.write_domain);
+ if (ret)
+ return ret;
+ }
obj->fenced_gpu_access = false;
}
}
/** Flushes any GPU write domain for the object if it's dirty. */
-static void
+static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return;
+ return 0;
/* Queue the GPU write cache flushing we need. */
- i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
- BUG_ON(obj->base.write_domain);
+ return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
if (obj->gtt_space == NULL)
return -EINVAL;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
if (obj->gtt_space == NULL)
return -EINVAL;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
/* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible)
{
+ int ret;
+
if (!obj->active)
return 0;
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
- i915_gem_flush_ring(obj->base.dev, obj->ring,
- 0, obj->base.write_domain);
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+ 0, obj->base.write_domain);
+ if (ret)
+ return ret;
+ }
return i915_gem_object_wait_rendering(obj, interruptible);
}
uint32_t old_write_domain, old_read_domains;
int ret;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
* flush earlier is beneficial.
*/
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- i915_gem_flush_ring(dev, obj->ring,
- 0, obj->base.write_domain);
+ ret = i915_gem_flush_ring(dev, obj->ring,
+ 0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) {
struct drm_i915_gem_request *request;