#endif /* CONFIG_COMPAT_USB_URB_THREAD_FIX */
+struct workqueue_struct *system_wq __read_mostly;
+struct workqueue_struct *system_long_wq __read_mostly;
struct workqueue_struct *system_nrt_wq __read_mostly;
+EXPORT_SYMBOL_GPL(system_wq);
+EXPORT_SYMBOL_GPL(system_long_wq);
EXPORT_SYMBOL_GPL(system_nrt_wq);
+int compat_schedule_work(struct work_struct *work)
+{
+ return queue_work(system_wq, work);
+}
+EXPORT_SYMBOL_GPL(compat_schedule_work);
+
+int compat_schedule_work_on(int cpu, struct work_struct *work)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
+ return queue_work_on(cpu, system_wq, work);
+#else
+ return queue_work(system_wq, work);
+#endif
+}
+EXPORT_SYMBOL_GPL(compat_schedule_work_on);
+
+int compat_schedule_delayed_work(struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work(system_wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(compat_schedule_delayed_work);
+
+int compat_schedule_delayed_work_on(int cpu,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work_on(cpu, system_wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(compat_schedule_delayed_work_on);
+
+void compat_flush_scheduled_work(void)
+{
+ /*
+ * It is debatable which one we should prioritize first, lets
+ * go with the old kernel's one first for now (keventd_wq) and
+ * if think its reasonable later we can flip this around.
+ */
+ flush_workqueue(system_wq);
+ flush_scheduled_work();
+}
+EXPORT_SYMBOL_GPL(compat_flush_scheduled_work);
+
void compat_system_workqueue_create()
{
+ system_wq = alloc_workqueue("events", 0, 0);
+ system_long_wq = alloc_workqueue("events_long", 0, 0);
system_nrt_wq = create_singlethread_workqueue("events_nrt");
- WARN_ON(!system_nrt_wq);
+ BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
}
void compat_system_workqueue_destroy()
{
+ destroy_workqueue(system_wq);
+ destroy_workqueue(system_long_wq);
destroy_workqueue(system_nrt_wq);
}
{
}
+/*
+ * System-wide workqueues which are always present.
+ *
+ * system_wq is the one used by schedule[_delayed]_work[_on]().
+ * Multi-CPU multi-threaded. There are users which expect relatively
+ * short queue flush time. Don't queue works which can run for too
+ * long.
+ *
+ * system_long_wq is similar to system_wq but may host long running
+ * works. Queue flushing might take relatively long.
+ *
+ * system_nrt_wq is non-reentrant and guarantees that any given work
+ * item is never executed in parallel by multiple CPUs. Queue
+ * flushing might take relatively long.
+ */
+extern struct workqueue_struct *system_wq;
+extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_nrt_wq;
void compat_system_workqueue_create(void);
void compat_system_workqueue_destroy(void);
+int compat_schedule_work(struct work_struct *work);
+int compat_schedule_work_on(int cpu, struct work_struct *work);
+int compat_schedule_delayed_work(struct delayed_work *dwork,
+ unsigned long delay);
+int compat_schedule_delayed_work_on(int cpu,
+ struct delayed_work *dwork,
+ unsigned long delay);
+void compat_flush_scheduled_work(void);
+
+#define schedule_work(work) compat_schedule_work(work)
+#define schedule_work_on(cpu, work) compat_schedule_work_on(cpu, work)
+#define schedule_delayed_work(dwork, delay) compat_schedule_delayed_work(dwork, delay)
+#define schedule_delayed_work_on(cpu, dwork, delay) compat_schedule_delayed_work_on(cpu, dwork, delay)
+#define flush_scheduled_work(a) compat_flush_scheduled_work(a)
+
#define br_port_exists(dev) (dev->br_port)
#else