#define create_freezable_workqueue create_freezeable_workqueue
#endif
-#ifndef alloc_ordered_workqueue
-#define alloc_ordered_workqueue(name, flags) create_singlethread_workqueue(name)
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
+#define WQ_HIGHPRI 0
+#define WQ_MEM_RECLAIM 0
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-#define alloc_workqueue(name, flags, max_active) __create_workqueue(name, flags, max_active)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+#define WQ_UNBOUND 0
#endif
-
-#ifndef alloc_workqueue
-#define alloc_workqueue(name, flags, max_active) __create_workqueue(name, flags, max_active, 0)
+#define __WQ_ORDERED 0
+/*
+ * commit b196be89cdc14a88cc637cdad845a75c5886c82d
+ * Author: Tejun Heo <tj@kernel.org>
+ * Date: Tue Jan 10 15:11:35 2012 -0800
+ *
+ * workqueue: make alloc_workqueue() take printf fmt and args for name
+ */
+struct workqueue_struct *
+backport_alloc_workqueue(const char *fmt, unsigned int flags,
+ int max_active, struct lock_class_key *key,
+ const char *lock_name, ...);
+#undef alloc_workqueue
+#ifdef CONFIG_LOCKDEP
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+({ \
+ static struct lock_class_key __key; \
+ const char *__lock_name; \
+ \
+ if (__builtin_constant_p(fmt)) \
+ __lock_name = (fmt); \
+ else \
+ __lock_name = #fmt; \
+ \
+ backport_alloc_workqueue((fmt), (flags), (max_active), \
+ &__key, __lock_name, ##args); \
+})
+#else
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+ backport_alloc_workqueue((fmt), (flags), (max_active), \
+ NULL, NULL, ##args)
+#endif
+#undef alloc_ordered_workqueue
+#define alloc_ordered_workqueue(fmt, flags, args...) \
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+#define destroy_workqueue backport_destroy_workqueue
+void backport_destroy_workqueue(struct workqueue_struct *wq);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
#include <linux/version.h>
#include <linux/skbuff.h>
#include <linux/module.h>
+#include <linux/workqueue.h>
#include <net/dst.h>
#include <net/xfrm.h>
return n;
}
EXPORT_SYMBOL_GPL(__pskb_copy);
+
+static DEFINE_SPINLOCK(wq_name_lock);
+static struct list_head wq_name_list;
+
+struct wq_name {
+ struct list_head list;
+ struct workqueue_struct *wq;
+ char name[24];
+};
+
+struct workqueue_struct *
+backport_alloc_workqueue(const char *fmt, unsigned int flags,
+ int max_active, struct lock_class_key *key,
+ const char *lock_name, ...)
+{
+ struct workqueue_struct *wq;
+ struct wq_name *n = kzalloc(sizeof(*n), GFP_KERNEL);
+ va_list args;
+
+ if (!n)
+ return NULL;
+
+ va_start(args, lock_name);
+ vsnprintf(n->name, sizeof(n->name), fmt, args);
+ va_end(args);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+ wq = __create_workqueue_key(n->name, max_active == 1, 0,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
+ 0,
+#endif
+ key, lock_name);
+#else
+ wq = __alloc_workqueue_key(n->name, flags, max_active, key, lock_name);
+#endif
+ if (!wq) {
+ kfree(n);
+ return NULL;
+ }
+
+ n->wq = wq;
+ spin_lock(&wq_name_lock);
+ list_add(&n->list, &wq_name_list);
+ spin_unlock(&wq_name_lock);
+
+ return wq;
+}
+EXPORT_SYMBOL_GPL(backport_alloc_workqueue);
+
+void backport_destroy_workqueue(struct workqueue_struct *wq)
+{
+ struct wq_name *n, *tmp;
+
+ /* call original */
+#undef destroy_workqueue
+ destroy_workqueue(wq);
+
+ spin_lock(&wq_name_lock);
+ list_for_each_entry_safe(n, tmp, &wq_name_list, list) {
+ if (n->wq == wq) {
+ list_del(&n->list);
+ kfree(n);
+ break;
+ }
+ }
+ spin_unlock(&wq_name_lock);
+}
+EXPORT_SYMBOL_GPL(backport_destroy_workqueue);