Add the capability to query information from a submit queue.
The first available parameter is for querying the number of GPU faults
(hangs) that can be attributed to the queue.
This is useful for implementing context robustness. A user context can
regularly query the number of faults to see if it is responsible for any
and if so it can invalidate itself.
This is also helpful for testing by confirming to the user driver if a
particular command stream caused a fault (or not as the case may be).
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@chromium.org>
* MSM_GEM_INFO ioctl.
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
* GEM object's debug name
+ * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 4
+#define MSM_VERSION_MINOR 5
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
args->flags, &args->id);
}
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return msm_submitqueue_query(dev, file->driver_priv, data);
+}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
u32 id);
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
if (submit) {
struct task_struct *task;
- /* Increment the fault count */
+ /* Increment the fault counts */
gpu->global_faults++;
+ submit->queue->faults++;
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
+static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
+ struct drm_msm_submitqueue_query *args)
+{
+ size_t size = min_t(size_t, args->len, sizeof(queue->faults));
+ int ret;
+
+ /* If a zero length was passed in, return the data size we expect */
+ if (!args->len) {
+ args->len = sizeof(queue->faults);
+ return 0;
+ }
+
+ /* Set the length to the actual size of the data */
+ args->len = size;
+
+ ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
+
+ return ret ? -EFAULT : 0;
+}
+
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args)
+{
+ struct msm_gpu_submitqueue *queue;
+ int ret = -EINVAL;
+
+ if (args->pad)
+ return -EINVAL;
+
+ queue = msm_submitqueue_get(ctx, args->id);
+ if (!queue)
+ return -ENOENT;
+
+ if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
+ ret = msm_submitqueue_query_faults(queue, args);
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
__u32 id; /* out, identifier */
};
+#define MSM_SUBMITQUEUE_PARAM_FAULTS 0
+
+struct drm_msm_submitqueue_query {
+ __u64 data;
+ __u32 id;
+ __u32 param;
+ __u32 len;
+ __u32 pad;
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
*/
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
+#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
#if defined(__cplusplus)
}