enum vmw_resource_relocation_type rel_type:3;
};
-/**
- * struct vmw_resource_val_node - Validation info for resources
- *
- * @head: List head for the software context's resource list.
- * @hash: Hash entry for quick resouce to val_node lookup.
- * @res: Ref-counted pointer to the resource.
- * @switch_backup: Boolean whether to switch backup buffer on unreserve.
- * @new_backup: Refcounted pointer to the new backup buffer.
- * @staged_bindings: If @res is a context, tracks bindings set up during
- * the command batch. Otherwise NULL.
- * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
- * @first_usage: Set to true the first time the resource is referenced in
- * the command stream.
- * @switching_backup: The command stream provides a new backup buffer for a
- * resource.
- * @no_buffer_needed: This means @switching_backup is true on first buffer
- * reference. So resource reservation does not need to allocate a backup
- * buffer for the resource.
+/*
+ * struct vmw_ctx_validation_info - Extra validation metadata for contexts
+ * @head: List head of context list
+ * @ctx: The context resource
+ * @cur: The context's persistent binding state
+ * @staged: The binding state changes of this command buffer
*/
-struct vmw_resource_val_node {
+struct vmw_ctx_validation_info {
struct list_head head;
- struct drm_hash_item hash;
- struct vmw_resource *res;
- struct vmw_buffer_object *new_backup;
- struct vmw_ctx_binding_state *staged_bindings;
- unsigned long new_backup_offset;
- u32 first_usage : 1;
- u32 switching_backup : 1;
- u32 no_buffer_needed : 1;
+ struct vmw_resource *ctx;
+ struct vmw_ctx_binding_state *cur;
+ struct vmw_ctx_binding_state *staged;
};
/**
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p);
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_buffer_object *vbo,
- bool validate_as_mob,
- uint32_t *p_val_node);
/**
* vmw_ptr_diff - Compute the offset from a to b in bytes
*
}
/**
- * vmw_resources_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @sw_context: pointer to the software context
- * @backoff: Whether command submission failed.
+ * vmw_execbuf_bindings_commit - Commit modified binding state
+ * @sw_context: The command submission context
+ * @backoff: Whether this is part of the error path and binding state
+ * changes should be ignored
*/
-static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
- bool backoff)
+static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
+ bool backoff)
{
- struct vmw_resource_val_node *val;
- struct list_head *list = &sw_context->resource_list;
+ struct vmw_ctx_validation_info *entry, *next;
- if (sw_context->dx_query_mob && !backoff)
- vmw_context_bind_dx_query(sw_context->dx_query_ctx,
- sw_context->dx_query_mob);
+ list_for_each_entry_safe(entry, next, &sw_context->ctx_list, head) {
+ list_del(&entry->head);
- list_for_each_entry(val, list, head) {
- struct vmw_resource *res = val->res;
- bool switch_backup =
- (backoff) ? false : val->switching_backup;
-
- /*
- * Transfer staged context bindings to the
- * persistent context binding tracker.
- */
- if (unlikely(val->staged_bindings)) {
- if (!backoff) {
- vmw_binding_state_commit
- (vmw_context_binding_state(val->res),
- val->staged_bindings);
- }
-
- if (val->staged_bindings != sw_context->staged_bindings)
- vmw_binding_state_free(val->staged_bindings);
- else
- sw_context->staged_bindings_inuse = false;
- val->staged_bindings = NULL;
- }
- vmw_resource_unreserve(res, switch_backup, val->new_backup,
- val->new_backup_offset);
- vmw_bo_unreference(&val->new_backup);
+ if (!backoff)
+ vmw_binding_state_commit(entry->cur, entry->staged);
+ if (entry->staged != sw_context->staged_bindings)
+ vmw_binding_state_free(entry->staged);
+ else
+ sw_context->staged_bindings_inuse = false;
}
}
+/**
+ * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
+ * @sw_context: The command submission context
+ */
+static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
+{
+ if (sw_context->dx_query_mob)
+ vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+ sw_context->dx_query_mob);
+}
+
/**
* vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
* added to the validate list.
*
* @dev_priv: Pointer to the device private:
- * @sw_context: The validation context:
- * @node: The validation node holding this context.
+ * @sw_context: The command submission context
+ * @node: The validation node holding the context resource metadata
*/
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource_val_node *node)
+ struct vmw_resource *res,
+ struct vmw_ctx_validation_info *node)
{
int ret;
- ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_err;
}
if (sw_context->staged_bindings_inuse) {
- node->staged_bindings = vmw_binding_state_alloc(dev_priv);
- if (IS_ERR(node->staged_bindings)) {
+ node->staged = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(node->staged)) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
- ret = PTR_ERR(node->staged_bindings);
- node->staged_bindings = NULL;
+ ret = PTR_ERR(node->staged);
+ node->staged = NULL;
goto out_err;
}
} else {
- node->staged_bindings = sw_context->staged_bindings;
+ node->staged = sw_context->staged_bindings;
sw_context->staged_bindings_inuse = true;
}
+ node->ctx = res;
+ node->cur = vmw_context_binding_state(res);
+ list_add_tail(&node->head, &sw_context->ctx_list);
+
return 0;
out_err:
return ret;
* struct vmw_resource_val_node, if non-NULL on entry.
*/
static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- struct vmw_resource_val_node **p_node)
+ struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_resource_val_node *node;
- struct drm_hash_item *hash;
int ret;
+ enum vmw_res_type res_type = vmw_res_type(res);
+ struct vmw_res_cache_entry *rcache;
+ struct vmw_ctx_validation_info *ctx_info;
+ bool first_usage;
+ size_t priv_size;
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
- &hash) == 0)) {
- node = container_of(hash, struct vmw_resource_val_node, hash);
- node->first_usage = false;
- if (unlikely(p_node != NULL))
- *p_node = node;
- return 0;
- }
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node)) {
- DRM_ERROR("Failed to allocate a resource validation "
- "entry.\n");
- return -ENOMEM;
- }
+ /*
+ * If the resource is a context, set up structures to track
+ * context bindings.
+ */
+ priv_size = (res_type == vmw_res_dx_context ||
+ (res_type == vmw_res_context && dev_priv->has_mob)) ?
+ sizeof(*ctx_info) : 0;
- node->hash.key = (unsigned long) res;
- ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to initialize a resource validation "
- "entry.\n");
- kfree(node);
+ ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+ (void **)&ctx_info, &first_usage);
+ if (ret)
return ret;
- }
- node->res = vmw_resource_reference(res);
- node->first_usage = true;
- if (unlikely(p_node != NULL))
- *p_node = node;
- if (!dev_priv->has_mob) {
- list_add_tail(&node->head, &sw_context->resource_list);
- return 0;
+ if (priv_size && first_usage) {
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+ ctx_info);
+ if (ret)
+ return ret;
}
- switch (vmw_res_type(res)) {
- case vmw_res_context:
- case vmw_res_dx_context:
- list_add(&node->head, &sw_context->ctx_resource_list);
- ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
- break;
- case vmw_res_cotable:
- list_add_tail(&node->head, &sw_context->ctx_resource_list);
- break;
- default:
- list_add_tail(&node->head, &sw_context->resource_list);
- break;
- }
+ /* Cache info about the last added resource */
+ rcache = &sw_context->res_cache[res_type];
+ rcache->res = res;
+ rcache->private = ctx_info;
+ rcache->valid = 1;
+ rcache->valid_handle = 0;
return ret;
}
* First add the resource the view is pointing to, otherwise
* it may be swapped out when the view is validated.
*/
- ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+ ret = vmw_resource_val_add(sw_context, vmw_view_srf(view));
if (ret)
return ret;
- return vmw_resource_val_add(sw_context, view, NULL);
+ return vmw_resource_val_add(sw_context, view);
}
/**
static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
enum vmw_view_type view_type, u32 id)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *view;
int ret;
if (IS_ERR(res))
continue;
- ret = vmw_resource_val_add(sw_context, res, NULL);
+ ret = vmw_resource_val_add(sw_context, res);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
- ret = vmw_resource_val_add(sw_context, entry->res,
- NULL);
+ ret = vmw_resource_val_add(sw_context, entry->res);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
- ret = vmw_bo_to_validate_list(sw_context,
- dx_query_mob,
- true, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ dx_query_mob, true, false);
}
mutex_unlock(&dev_priv->binding_mutex);
return 0;
}
-/**
- * vmw_bo_to_validate_list - add a bo to a validate list
- *
- * @sw_context: The software context used for this command submission batch.
- * @bo: The buffer object to add.
- * @validate_as_mob: Validate this buffer as a MOB.
- * @p_val_node: If non-NULL Will be updated with the validate node number
- * on return.
- *
- * Returns -EINVAL if the limit of number of buffer objects per command
- * submission is reached.
- */
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_buffer_object *vbo,
- bool validate_as_mob,
- uint32_t *p_val_node)
-{
- uint32_t val_node;
- struct vmw_validate_buffer *vval_buf;
- struct ttm_validate_buffer *val_buf;
- struct drm_hash_item *hash;
- int ret;
-
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
- &hash) == 0)) {
- vval_buf = container_of(hash, struct vmw_validate_buffer,
- hash);
- if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
- DRM_ERROR("Inconsistent buffer usage.\n");
- return -EINVAL;
- }
- val_buf = &vval_buf->base;
- val_node = vval_buf - sw_context->val_bufs;
- } else {
- val_node = sw_context->cur_val_buf;
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission "
- "exceeded.\n");
- return -EINVAL;
- }
- vval_buf = &sw_context->val_bufs[val_node];
- vval_buf->hash.key = (unsigned long) vbo;
- ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to initialize a buffer validation "
- "entry.\n");
- return ret;
- }
- ++sw_context->cur_val_buf;
- val_buf = &vval_buf->base;
- val_buf->bo = ttm_bo_reference(&vbo->base);
- val_buf->shared = false;
- list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- vval_buf->validate_as_mob = validate_as_mob;
- }
-
- if (p_val_node)
- *p_val_node = val_node;
-
- return 0;
-}
-
/**
* vmw_resources_reserve - Reserve all resources on the sw_context's
* resource list.
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
- struct vmw_resource_val_node *val;
- int ret = 0;
-
- list_for_each_entry(val, &sw_context->resource_list, head) {
- struct vmw_resource *res = val->res;
-
- ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
- if (unlikely(ret != 0))
- return ret;
-
- if (res->backup) {
- struct vmw_buffer_object *vbo = res->backup;
-
- ret = vmw_bo_to_validate_list
- (sw_context, vbo,
- vmw_resource_needs_backup(res), NULL);
+ int ret;
- if (unlikely(ret != 0))
- return ret;
- }
- }
+ ret = vmw_validation_res_reserve(sw_context->ctx, true);
+ if (ret)
+ return ret;
if (sw_context->dx_query_mob) {
struct vmw_buffer_object *expected_dx_query_mob;
return ret;
}
-/**
- * vmw_resources_validate - Validate all resources on the sw_context's
- * resource list.
- *
- * @sw_context: Pointer to the software context.
- *
- * Before this function is called, all resource backup buffers must have
- * been validated.
- */
-static int vmw_resources_validate(struct vmw_sw_context *sw_context)
-{
- struct vmw_resource_val_node *val;
- int ret;
-
- list_for_each_entry(val, &sw_context->resource_list, head) {
- struct vmw_resource *res = val->res;
- struct vmw_buffer_object *backup = res->backup;
-
- ret = vmw_resource_validate(res, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to validate resource.\n");
- return ret;
- }
-
- /* Check if the resource switched backup buffer */
- if (backup && res->backup && (backup != res->backup)) {
- struct vmw_buffer_object *vbo = res->backup;
-
- ret = vmw_bo_to_validate_list
- (sw_context, vbo,
- vmw_resource_needs_backup(res), NULL);
- if (ret) {
- ttm_bo_unreserve(&vbo->base);
- return ret;
- }
- }
- }
- return 0;
-}
-
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
- *
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
- * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
- * used for this resource is returned here.
+ *
+ * Return: Zero on success, negative error code on error
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t *id_loc,
- struct vmw_resource *res,
- struct vmw_resource_val_node **p_val)
+ struct vmw_resource *res)
{
int ret;
- struct vmw_resource_val_node *node;
- *p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
vmw_ptr_diff(sw_context->buf_start,
if (unlikely(ret != 0))
return ret;
- ret = vmw_resource_val_add(sw_context, res, &node);
+ ret = vmw_resource_val_add(sw_context, res);
if (unlikely(ret != 0))
return ret;
- if (p_val)
- *p_val = node;
-
return 0;
}
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
+ struct vmw_resource **p_res)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
struct vmw_resource *res;
- struct vmw_resource_val_node *node;
int ret;
+ if (p_res)
+ *p_res = NULL;
+
if (*id_loc == SVGA3D_INVALID_ID) {
- if (p_val)
- *p_val = NULL;
if (res_type == vmw_res_context) {
DRM_ERROR("Illegal context invalid id.\n");
return -EINVAL;
* resource
*/
- if (likely(rcache->valid && *id_loc == rcache->handle)) {
- const struct vmw_resource *res = rcache->res;
+ if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
+ struct vmw_resource *res = rcache->res;
- rcache->node->first_usage = false;
- if (p_val)
- *p_val = rcache->node;
+ if (p_res)
+ *p_res = res;
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
return ret;
}
- rcache->valid = true;
- rcache->res = res;
- rcache->handle = *id_loc;
-
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
- res, &node);
+ res);
if (unlikely(ret != 0))
goto out_no_reloc;
- rcache->node = node;
- if (p_val)
- *p_val = node;
+ if (p_res)
+ *p_res = res;
+
+ if (rcache->valid && rcache->res == res) {
+ rcache->valid_handle = true;
+ rcache->handle = *id_loc;
+ }
+
vmw_resource_unreference(&res);
return 0;
*/
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
- struct vmw_resource_val_node *val;
+ struct vmw_ctx_validation_info *val;
int ret;
- list_for_each_entry(val, &sw_context->resource_list, head) {
- if (unlikely(!val->staged_bindings))
- break;
-
- ret = vmw_binding_rebind_all
- (vmw_context_binding_state(val->res));
+ list_for_each_entry(val, &sw_context->ctx_list, head) {
+ ret = vmw_binding_rebind_all(val->cur);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
- ret = vmw_rebind_all_dx_query(val->res);
+ ret = vmw_rebind_all_dx_query(val->ctx);
if (ret != 0)
return ret;
}
uint32 view_ids[], u32 num_views,
u32 first_slot)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_cmdbuf_res_manager *man;
u32 i;
int ret;
return ret;
}
}
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.res = view;
binding.bi.bt = binding_type;
binding.shader_slot = shader_slot;
binding.slot = first_slot + i;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
shader_slot, binding.slot);
if (view)
vmw_resource_unreference(&view);
user_context_converter, &cmd->cid, NULL);
}
+/**
+ * vmw_execbuf_info_from_res - Get the private validation metadata for a
+ * recently validated resource
+ * @sw_context: Pointer to the command submission context
+ * @res: The resource
+ *
+ * The resource pointed to by @res needs to be present in the command submission
+ * context's resource cache and hence the last resource of that type to be
+ * processed by the validation code.
+ *
+ * Return: a pointer to the private metadata of the resource, or NULL
+ * if it wasn't found
+ */
+static struct vmw_ctx_validation_info *
+vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res)
+{
+ struct vmw_res_cache_entry *rcache =
+ &sw_context->res_cache[vmw_res_type(res)];
+
+ if (rcache->valid && rcache->res == res)
+ return rcache->private;
+
+ WARN_ON_ONCE(true);
+ return NULL;
+}
+
+
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.target.sid, &res_node);
+ &cmd->body.target.sid, &res);
if (unlikely(ret != 0))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_view binding;
+ struct vmw_ctx_validation_info *node;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ node = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!node)
+ return -EINVAL;
+
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_rt;
binding.slot = cmd->body.type;
- vmw_binding_add(ctx_node->staged_bindings,
- &binding.bi, 0, binding.slot);
+ vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
}
return 0;
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
- ret = vmw_bo_to_validate_list(sw_context,
- sw_context->cur_query_bo,
- dev_priv->has_mob, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ sw_context->cur_query_bo,
+ dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- dev_priv->has_mob, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ dev_priv->dummy_query_bo,
+ dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->mob_loc = id;
reloc->location = NULL;
+ reloc->vbo = vmw_bo;
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
if (unlikely(ret != 0))
goto out_no_reloc;
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
+ reloc->vbo = vmw_bo;
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
if (unlikely(ret != 0))
goto out_no_reloc;
} *cmd;
int ret;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *cotable_res;
cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
- cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+ cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
vmw_resource_unreference(&cotable_res);
return ret;
sw_context->dx_query_mob = vmw_bo;
- sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
+ sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
vmw_bo_unreference(&vmw_bo);
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
int ret;
cmd = container_of(header, struct vmw_tex_state_cmd,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->state.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cur_state->value, &res_node);
+ &cur_state->value, &res);
if (unlikely(ret != 0))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_tex binding;
+ struct vmw_ctx_validation_info *node;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ node = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!node)
+ return -EINVAL;
+
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_tex;
binding.texture_stage = cur_state->stage;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
- 0, binding.texture_stage);
+ vmw_binding_add(node->staged, &binding.bi, 0,
+ binding.texture_stage);
}
}
*/
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource_val_node *val_node,
+ struct vmw_resource *res,
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_buffer_object *dma_buf;
+ struct vmw_buffer_object *vbo;
+ void *info;
int ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ info = vmw_execbuf_info_from_res(sw_context, res);
+ if (!info)
+ return -EINVAL;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
if (ret)
return ret;
- val_node->switching_backup = true;
- if (val_node->first_usage)
- val_node->no_buffer_needed = true;
-
- vmw_bo_unreference(&val_node->new_backup);
- val_node->new_backup = dma_buf;
- val_node->new_backup_offset = backup_offset;
+ vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
+ backup_offset);
+ vmw_bo_unreference(&vbo);
return 0;
}
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_resource_val_node *val_node;
+ struct vmw_resource *res;
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
- converter, res_id, &val_node);
+ converter, res_id, &res);
if (ret)
return ret;
- return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
buf_id, backup_offset);
}
} *cmd;
int ret;
size_t size;
- struct vmw_resource_val_node *val;
+ struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &val);
+ &ctx);
if (unlikely(ret != 0))
return ret;
size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(dev_priv,
- vmw_context_res_man(val->res),
+ vmw_context_res_man(ctx),
cmd->body.shid, cmd + 1,
cmd->body.type, size,
&sw_context->staged_cmd_res);
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
- struct vmw_resource_val_node *val;
+ struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &val);
+ &ctx);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_shader_remove(vmw_context_res_man(val->res),
+ ret = vmw_shader_remove(vmw_context_res_man(ctx),
cmd->body.shid,
cmd->body.type,
&sw_context->staged_cmd_res);
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
- struct vmw_resource_val_node *ctx_node, *res_node = NULL;
struct vmw_ctx_bindinfo_shader binding;
- struct vmw_resource *res = NULL;
+ struct vmw_resource *ctx, *res = NULL;
+ struct vmw_ctx_validation_info *ctx_info;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) {
- res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid,
cmd->body.type);
if (!IS_ERR(res)) {
+ struct vmw_resource *tmp_res = res;
+
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
- &cmd->body.shid, res,
- &res_node);
- vmw_resource_unreference(&res);
+ &cmd->body.shid, res);
+ vmw_resource_unreference(&tmp_res);
if (unlikely(ret != 0))
return ret;
}
}
- if (!res_node) {
+ if (IS_ERR_OR_NULL(res)) {
ret = vmw_cmd_res_check(dev_priv, sw_context,
vmw_res_shader,
user_shader_converter,
- &cmd->body.shid, &res_node);
+ &cmd->body.shid, &res);
if (unlikely(ret != 0))
return ret;
}
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!ctx_info)
+ return -EINVAL;
+
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_info->staged, &binding.bi,
binding.shader_slot, 0);
return 0;
}
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- struct vmw_resource_val_node *res_node = NULL;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res = NULL;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_cb binding;
int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.sid, &res_node);
+ &cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
binding.offset = cmd->body.offsetInBytes;
return -EINVAL;
}
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, binding.slot);
return 0;
SVGA3dCmdDXSetShader body;
} *cmd;
struct vmw_resource *res = NULL;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_shader binding;
int ret = 0;
return PTR_ERR(res);
}
- ret = vmw_resource_val_add(sw_context, res, NULL);
+ ret = vmw_resource_val_add(sw_context, res);
if (ret)
goto out_unref;
}
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, 0);
out_unref:
if (res)
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_vb binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetVertexBuffers body;
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->buf[i].sid, &res_node);
+ &cmd->buf[i].sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.bt = vmw_ctx_binding_vb;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.res = res;
binding.offset = cmd->buf[i].offset;
binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot);
}
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_ib binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetIndexBuffer body;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.sid, &res_node);
+ &cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_ib;
binding.offset = cmd->body.offset;
binding.format = cmd->body.format;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
return 0;
}
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
- struct vmw_resource_val_node *srf_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *srf;
struct vmw_resource *res;
enum vmw_view_type view_type;
int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->sid, &srf_node);
+ &cmd->sid, &srf);
if (unlikely(ret != 0))
return ret;
- res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+ res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
ret = vmw_cotable_notify(res, cmd->defined_id);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
return vmw_view_add(sw_context->man,
- ctx_node->res,
- srf_node->res,
+ ctx_node->ctx,
+ srf,
view_type,
cmd->defined_id,
header,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_so binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSOTargets body;
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->targets[i].sid, &res_node);
+ &cmd->targets[i].sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_so,
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot);
}
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
/*
* This is based on the fact that all affected define commands have
}
so_type = vmw_so_cmd_to_type(header->id);
- res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+ res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id);
vmw_resource_unreference(&res);
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
return -EINVAL;
}
- res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId);
vmw_resource_unreference(&res);
if (ret)
return ret;
- return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+ return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
cmd->body.shaderId, cmd->body.type,
&sw_context->staged_cmd_res);
}
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDestroyShader body;
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter,
- &cmd->body.cid, &ctx_node);
+ &cmd->body.cid, &ctx);
if (ret)
return ret;
} else {
- ctx_node = sw_context->dx_ctx_node;
- if (!ctx_node) {
+ if (!sw_context->dx_ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
+ ctx = sw_context->dx_ctx_node->ctx;
}
- res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, 0);
if (IS_ERR(res)) {
DRM_ERROR("Could not find shader to bind.\n");
return PTR_ERR(res);
}
- ret = vmw_resource_val_add(sw_context, res, &res_node);
+ ret = vmw_resource_val_add(sw_context, res);
if (ret) {
DRM_ERROR("Error creating resource validation node.\n");
goto out_unref;
}
- ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
+ ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
&cmd->body.mobid,
cmd->body.offsetInBytes);
out_unref:
{
uint32_t i;
struct vmw_relocation *reloc;
- struct ttm_validate_buffer *validate;
struct ttm_buffer_object *bo;
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index].base;
- bo = validate->bo;
+ bo = &reloc->vbo->base;
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
vmw_free_relocations(sw_context);
}
-/**
- * vmw_resource_list_unrefererence - Free up a resource list and unreference
- * all resources referenced by it.
- *
- * @list: The resource list.
- */
-static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
- struct list_head *list)
-{
- struct vmw_resource_val_node *val, *val_next;
-
- /*
- * Drop references to resources held during command submission.
- */
-
- list_for_each_entry_safe(val, val_next, list, head) {
- list_del_init(&val->head);
- vmw_resource_unreference(&val->res);
-
- if (val->staged_bindings) {
- if (val->staged_bindings != sw_context->staged_bindings)
- vmw_binding_state_free(val->staged_bindings);
- else
- sw_context->staged_bindings_inuse = false;
- val->staged_bindings = NULL;
- }
-
- kfree(val);
- }
-}
-
-static void vmw_clear_validations(struct vmw_sw_context *sw_context)
-{
- struct vmw_validate_buffer *entry, *next;
- struct vmw_resource_val_node *val;
-
- /*
- * Drop references to DMA buffers held during command submission.
- */
- list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- base.head) {
- list_del(&entry->base.head);
- ttm_bo_unref(&entry->base.bo);
- (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
- sw_context->cur_val_buf--;
- }
- BUG_ON(sw_context->cur_val_buf != 0);
-
- list_for_each_entry(val, &sw_context->resource_list, head)
- (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
-}
-
-int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob)
-{
- struct vmw_buffer_object *vbo =
- container_of(bo, struct vmw_buffer_object, base);
- struct ttm_operation_ctx ctx = { interruptible, true };
- int ret;
-
- if (vbo->pin_count > 0)
- return 0;
-
- if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
-
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
- if (likely(ret == 0 || ret == -ERESTARTSYS))
- return ret;
-
- /**
- * If that failed, try VRAM again, this time evicting
- * previous contents.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
- return ret;
-}
-
-static int vmw_validate_buffers(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context)
-{
- struct vmw_validate_buffer *entry;
- int ret;
-
- list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
- true,
- entry->validate_as_mob);
- if (unlikely(ret != 0))
- return ret;
- }
- return 0;
-}
-
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
uint32_t size)
{
if (sw_context->dx_ctx_node)
cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
- sw_context->dx_ctx_node->res->id);
+ sw_context->dx_ctx_node->ctx->id);
else
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (!cmd) {
u32 command_size,
struct vmw_sw_context *sw_context)
{
- u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+ u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID);
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
id, false, header);
struct vmw_sw_context *sw_context,
uint32_t handle)
{
- struct vmw_resource_val_node *ctx_node;
struct vmw_resource *res;
int ret;
return ret;
}
- ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+ ret = vmw_resource_val_add(sw_context, res);
if (unlikely(ret != 0))
goto out_err;
- sw_context->dx_ctx_node = ctx_node;
+ sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
sw_context->man = vmw_context_res_man(res);
out_err:
vmw_resource_unreference(&res);
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
struct vmw_resource *error_resource;
- struct list_head resource_list;
struct vmw_cmdbuf_header *header;
- struct ww_acquire_ctx ticket;
uint32_t handle;
int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
-
+ DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
- sw_context->cur_val_buf = 0;
- INIT_LIST_HEAD(&sw_context->resource_list);
- INIT_LIST_HEAD(&sw_context->ctx_resource_list);
+ INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
sw_context->dx_query_mob = NULL;
sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
- INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
- INIT_LIST_HEAD(&resource_list);
+ sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
- if (unlikely(ret != 0)) {
- list_splice_init(&sw_context->ctx_resource_list,
- &sw_context->resource_list);
+ if (unlikely(ret != 0))
goto out_err_nores;
- }
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
- /*
- * Merge the resource lists before checking the return status
- * from vmd_cmd_check_all so that all the open hashtabs will
- * be handled properly even if vmw_cmd_check_all fails.
- */
- list_splice_init(&sw_context->ctx_resource_list,
- &sw_context->resource_list);
-
if (unlikely(ret != 0))
goto out_err_nores;
if (unlikely(ret != 0))
goto out_err_nores;
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
- true, NULL);
+ ret = vmw_validation_bo_reserve(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err_nores;
- ret = vmw_validate_buffers(dev_priv, sw_context);
+ ret = vmw_validation_bo_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
- ret = vmw_resources_validate(sw_context);
+ ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
+ vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- vmw_resources_unreserve(sw_context, false);
+ vmw_execbuf_bindings_commit(sw_context, false);
+ vmw_bind_dx_query_mob(sw_context);
+ vmw_validation_res_unreserve(&val_ctx, false);
- ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
- (void *) fence);
+ vmw_validation_bo_fence(sw_context->ctx, fence);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
- vmw_clear_validations(sw_context);
-
/*
* If anything fails here, give up trying to export the fence
* and do a sync since the user mode will not be able to sync
vmw_fence_obj_unreference(&fence);
}
- list_splice_init(&sw_context->resource_list, &resource_list);
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(sw_context, &resource_list);
+ vmw_validation_unref_lists(&val_ctx);
return 0;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
- ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+ vmw_validation_bo_backoff(&val_ctx);
out_err_nores:
- vmw_resources_unreserve(sw_context, true);
+ vmw_execbuf_bindings_commit(sw_context, true);
+ vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
- vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
- list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
+ vmw_validation_drop_ht(&val_ctx);
+ WARN_ON(!list_empty(&sw_context->ctx_list));
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(sw_context, &resource_list);
+ vmw_validation_unref_lists(&val_ctx);
if (unlikely(error_resource != NULL))
vmw_resource_unreference(&error_resource);
out_free_header:
struct vmw_fence_obj *fence)
{
int ret = 0;
- struct list_head validate_list;
- struct ttm_validate_buffer pinned_val, query_val;
struct vmw_fence_obj *lfence = NULL;
- struct ww_acquire_ctx ticket;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- INIT_LIST_HEAD(&validate_list);
-
- pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
- pinned_val.shared = false;
- list_add_tail(&pinned_val.head, &validate_list);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
+ false);
+ if (ret)
+ goto out_no_reserve;
- query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
- query_val.shared = false;
- list_add_tail(&query_val.head, &validate_list);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
+ false);
+ if (ret)
+ goto out_no_reserve;
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
- false, NULL);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
+ ret = vmw_validation_bo_reserve(&val_ctx, false);
+ if (ret)
goto out_no_reserve;
- }
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
+ if (ret)
goto out_no_emit;
- }
dev_priv->query_cid_valid = false;
}
NULL);
fence = lfence;
}
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
+ vmw_validation_bo_fence(&val_ctx, fence);
if (lfence != NULL)
vmw_fence_obj_unreference(&lfence);
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
+ vmw_validation_unref_lists(&val_ctx);
vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock:
return;
out_no_emit:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
+ vmw_validation_bo_backoff(&val_ctx);
out_no_reserve:
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
+ vmw_validation_unref_lists(&val_ctx);
+ vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo);
+
}
/**