*
*/
+#include <linux/debugfs.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
static void sync_fence_signal_pt(struct sync_pt *pt);
static int _sync_pt_has_signaled(struct sync_pt *pt);
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+
+static LIST_HEAD(sync_fence_list_head);
+static DEFINE_SPINLOCK(sync_fence_list_lock);
+
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name)
{
struct sync_timeline *obj;
+ unsigned long flags;
if (size < sizeof(struct sync_timeline))
return NULL;
INIT_LIST_HEAD(&obj->active_list_head);
spin_lock_init(&obj->active_list_lock);
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
return obj;
}
+static void sync_timeline_free(struct sync_timeline *obj)
+{
+ unsigned long flags;
+
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_del(&obj->sync_timeline_list);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+ kfree(obj);
+}
+
void sync_timeline_destroy(struct sync_timeline *obj)
{
unsigned long flags;
spin_unlock_irqrestore(&obj->child_list_lock, flags);
if (needs_freeing)
- kfree(obj);
+ sync_timeline_free(obj);
else
sync_timeline_signal(obj);
}
spin_unlock_irqrestore(&obj->child_list_lock, flags);
if (needs_freeing)
- kfree(obj);
+ sync_timeline_free(obj);
}
void sync_timeline_signal(struct sync_timeline *obj)
static struct sync_fence *sync_fence_alloc(const char *name)
{
struct sync_fence *fence;
+ unsigned long flags;
fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
if (fence == NULL)
spin_lock_init(&fence->waiter_list_lock);
init_waitqueue_head(&fence->wq);
+
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
return fence;
err:
static int sync_fence_release(struct inode *inode, struct file *file)
{
struct sync_fence *fence = file->private_data;
+ unsigned long flags;
sync_fence_free_pts(fence);
+
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_del(&fence->sync_fence_list);
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
kfree(fence);
return 0;
case SYNC_IOC_MERGE:
return sync_fence_ioctl_merge(fence, arg);
+
default:
return -ENOTTY;
}
}
+#ifdef CONFIG_DEBUG_FS
+static const char *sync_status_str(int status)
+{
+ if (status > 0)
+ return "signaled";
+ else if (status == 0)
+ return "active";
+ else
+ return "error";
+}
+
+static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
+{
+ int status = pt->status;
+ seq_printf(s, " %s%spt %s",
+ fence ? pt->parent->name : "",
+ fence ? "_" : "",
+ sync_status_str(status));
+ if (pt->status) {
+ struct timeval tv = ktime_to_timeval(pt->timestamp);
+ seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+ }
+
+ if (pt->parent->ops->print_pt) {
+ seq_printf(s, ": ");
+ pt->parent->ops->print_pt(s, pt);
+ }
+
+ seq_printf(s, "\n");
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
+
+ if (obj->ops->print_obj) {
+ seq_printf(s, ": ");
+ obj->ops->print_obj(s, obj);
+ }
+
+ seq_printf(s, "\n");
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ list_for_each(pos, &obj->child_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, child_list);
+ sync_print_pt(s, pt, false);
+ }
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
+
+ list_for_each(pos, &fence->pt_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, pt_list);
+ sync_print_pt(s, pt, true);
+ }
+
+ spin_lock_irqsave(&fence->waiter_list_lock, flags);
+ list_for_each(pos, &fence->waiter_list_head) {
+ struct sync_fence_waiter *waiter =
+ container_of(pos, struct sync_fence_waiter,
+ waiter_list);
+
+ seq_printf(s, "waiter %pF %p\n", waiter->callback,
+ waiter->callback_data);
+ }
+ spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+ unsigned long flags;
+ struct list_head *pos;
+
+ seq_printf(s, "objs:\n--------------\n");
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_for_each(pos, &sync_timeline_list_head) {
+ struct sync_timeline *obj =
+ container_of(pos, struct sync_timeline,
+ sync_timeline_list);
+
+ sync_print_obj(s, obj);
+ seq_printf(s, "\n");
+ }
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+ seq_printf(s, "fences:\n--------------\n");
+
+ spin_lock_irqsave(&sync_fence_list_lock, flags);
+ list_for_each(pos, &sync_fence_list_head) {
+ struct sync_fence *fence =
+ container_of(pos, struct sync_fence, sync_fence_list);
+
+ sync_print_fence(s, fence);
+ seq_printf(s, "\n");
+ }
+ spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+ return 0;
+}
+
+static int sync_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_debugfs_fops = {
+ .open = sync_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+ debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+ return 0;
+}
+
+late_initcall(sync_debugfs_init);
+
+#endif
* -1 if a will signabl before b
* @free_pt: called before sync_pt is freed
* @release_obj: called before sync_timeline is freed
+ * @print_obj: print aditional debug information about sync_timeline.
+ * should not print a newline
+ * @print_pt: print aditional debug information about sync_pt.
+ * should not print a newline
*/
struct sync_timeline_ops {
const char *driver_name;
/* optional */
void (*release_obj)(struct sync_timeline *sync_timeline);
+
+ /* optional */
+ void (*print_obj)(struct seq_file *s,
+ struct sync_timeline *sync_timeline);
+
+ /* optional */
+ void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt);
};
/**
* @child_list_lock: lock protecting @child_list_head, destroyed, and
* sync_pt.status
* @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list: membership in global sync_timeline_list
*/
struct sync_timeline {
const struct sync_timeline_ops *ops;
struct list_head active_list_head;
spinlock_t active_list_lock;
+
+ struct list_head sync_timeline_list;
};
/**
* @status: 1: signaled, 0:active, <0: error
*
* @wq: wait queue for fence signaling
+ * @sync_fence_list: membership in global fence list
*/
struct sync_fence {
struct file *file;
int status;
wait_queue_head_t wq;
+
+ struct list_head sync_fence_list;
};
/**
*/
int sync_fence_wait(struct sync_fence *fence, long timeout);
-/* useful for sync driver's debug print handlers */
-const char *sync_status_str(int status);
-
#endif /* __KERNEL__ */
/**