--- /dev/null
+++ b/drivers/net/imq.c
-@@ -0,0 +1,410 @@
+@@ -0,0 +1,464 @@
+/*
+ * Pseudo-driver for the intermediate queue device.
+ *
+ * of IMQ again: http://www.linuximq.net
+ *
+ *
-+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7 including
-+ * the following changes:
++ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
++ * including the following changes:
+ *
+ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
+ * - Correction of imq_init_devs() issue that resulted in
+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
+ * I didn't forget anybody). I apologize again for my lack of time.
+ *
++ *
++ * 2008/06/07 - Changed imq.c to use qdisc_run() instead of
++ * qdisc_restart() and moved qdisc_run() to tasklet to avoid
++ * recursive locking. (Jussi Kivilinna)
++ *
++ *
+ * More info at: http://www.linuximq.net/ (Andre Correa)
+ */
+
+#include <linux/if_arp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ #include <linux/netfilter_ipv6.h>
+#endif
+#include <linux/imq.h>
+#include <net/pkt_sched.h>
+#include <net/netfilter/nf_queue.h>
+
-+extern int qdisc_restart1(struct net_device *dev);
++struct imq_private {
++ struct tasklet_struct tasklet;
++ int tasklet_pending;
++};
+
+static nf_hookfn imq_nf_hook;
+
+#endif
+};
+
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct nf_hook_ops imq_ingress_ipv6 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+
+static struct net_device_stats *imq_get_stats(struct net_device *dev)
+{
-+ return (struct net_device_stats *)dev->priv;
++ return &dev->stats;
+}
+
+/* called for packets kfree'd in qdiscs at places other than enqueue */
+static void imq_skb_destructor(struct sk_buff *skb)
+{
-+ struct nf_queue_entry *info = skb->nf_queue_entry;
-+
-+ if (info) {
-+ if (info->indev)
-+ dev_put(info->indev);
-+ if (info->outdev)
-+ dev_put(info->outdev);
-+ kfree(info);
++ struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++ if (entry) {
++ if (entry->indev)
++ dev_put(entry->indev);
++ if (entry->outdev)
++ dev_put(entry->outdev);
++ kfree(entry);
+ }
+}
+
+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
-+ struct net_device_stats *stats = (struct net_device_stats*) dev->priv;
-+
-+ stats->tx_bytes += skb->len;
-+ stats->tx_packets++;
++ dev->stats.tx_bytes += skb->len;
++ dev->stats.tx_packets++;
+
+ skb->imq_flags = 0;
+ skb->destructor = NULL;
+ return 0;
+}
+
-+static int imq_nf_queue(struct nf_queue_entry *info, unsigned queue_num)
++static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
+{
+ struct net_device *dev;
-+ struct net_device_stats *stats;
++ struct imq_private *priv;
+ struct sk_buff *skb2 = NULL;
+ struct Qdisc *q;
-+ unsigned int index = info->skb->imq_flags&IMQ_F_IFMASK;
++ unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK;
+ int ret = -1;
+
+ if (index > numdevs)
+ return -1;
+
+ dev = imq_devs + index;
++ priv = netdev_priv(dev);
+ if (!(dev->flags & IFF_UP)) {
-+ info->skb->imq_flags = 0;
-+ nf_reinject(info, NF_ACCEPT);
++ entry->skb->imq_flags = 0;
++ nf_reinject(entry, NF_ACCEPT);
+ return 0;
+ }
+ dev->last_rx = jiffies;
+
-+ if (info->skb->destructor) {
-+ skb2 = info->skb;
-+ info->skb = skb_clone(info->skb, GFP_ATOMIC);
-+ if (!info->skb)
++ if (entry->skb->destructor) {
++ skb2 = entry->skb;
++ entry->skb = skb_clone(entry->skb, GFP_ATOMIC);
++ if (!entry->skb)
+ return -1;
+ }
-+ info->skb->nf_queue_entry = info;
++ entry->skb->nf_queue_entry = entry;
+
-+ stats = (struct net_device_stats *)dev->priv;
-+ stats->rx_bytes+= info->skb->len;
-+ stats->rx_packets++;
++ dev->stats.rx_bytes += entry->skb->len;
++ dev->stats.rx_packets++;
+
+ spin_lock_bh(&dev->queue_lock);
+ q = dev->qdisc;
+ if (q->enqueue) {
-+ q->enqueue(skb_get(info->skb), q);
-+ if (skb_shared(info->skb)) {
-+ info->skb->destructor = imq_skb_destructor;
-+ kfree_skb(info->skb);
++ q->enqueue(skb_get(entry->skb), q);
++ if (skb_shared(entry->skb)) {
++ entry->skb->destructor = imq_skb_destructor;
++ kfree_skb(entry->skb);
+ ret = 0;
+ }
+ }
-+ if (spin_is_locked(&dev->_xmit_lock))
-+ netif_schedule(dev);
-+ else
-+ while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
-+ /* NOTHING */;
+
+ spin_unlock_bh(&dev->queue_lock);
+
++ if (!test_and_set_bit(1, &priv->tasklet_pending))
++ tasklet_schedule(&priv->tasklet);
++
+ if (skb2)
-+ kfree_skb(ret ? info->skb : skb2);
++ kfree_skb(ret ? entry->skb : skb2);
+
+ return ret;
+}
+ .outfn = imq_nf_queue,
+};
+
++static void qdisc_run_tasklet(unsigned long arg)
++{
++ struct net_device *dev = (struct net_device *)arg;
++ struct imq_private *priv = netdev_priv(dev);
++
++ spin_lock(&dev->queue_lock);
++ qdisc_run(dev);
++ spin_unlock(&dev->queue_lock);
++
++ clear_bit(1, &priv->tasklet_pending);
++}
++
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
+ const struct net_device *indev,
+ const struct net_device *outdev,
+ return NF_ACCEPT;
+}
+
++static int imq_close(struct net_device *dev)
++{
++ struct imq_private *priv = netdev_priv(dev);
++
++ tasklet_kill(&priv->tasklet);
++ netif_stop_queue(dev);
++
++ return 0;
++}
++
++static int imq_open(struct net_device *dev)
++{
++ struct imq_private *priv = netdev_priv(dev);
++
++ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev);
++ netif_start_queue(dev);
++
++ return 0;
++}
+
+static int __init imq_init_hooks(void)
+{
+ int err;
+
+ err = nf_register_queue_handler(PF_INET, &nfqh);
-+ if (err > 0)
++ if (err)
+ goto err1;
-+ if ((err = nf_register_hook(&imq_ingress_ipv4)))
++
++ err = nf_register_hook(&imq_ingress_ipv4);
++ if (err)
+ goto err2;
-+ if ((err = nf_register_hook(&imq_egress_ipv4)))
++
++ err = nf_register_hook(&imq_egress_ipv4);
++ if (err)
+ goto err3;
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-+ if ((err = nf_register_queue_handler(PF_INET6, &nfqh)))
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ err = nf_register_queue_handler(PF_INET6, &nfqh);
++ if (err)
+ goto err4;
-+ if ((err = nf_register_hook(&imq_ingress_ipv6)))
++
++ err = nf_register_hook(&imq_ingress_ipv6);
++ if (err)
+ goto err5;
-+ if ((err = nf_register_hook(&imq_egress_ipv6)))
++
++ err = nf_register_hook(&imq_egress_ipv6);
++ if (err)
+ goto err6;
+#endif
+
+ return 0;
+
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+err6:
+ nf_unregister_hook(&imq_ingress_ipv6);
+err5:
+
+static void __exit imq_unhook(void)
+{
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ nf_unregister_hook(&imq_ingress_ipv6);
+ nf_unregister_hook(&imq_egress_ipv6);
+ nf_unregister_queue_handler(PF_INET6, &nfqh);
+static int __init imq_dev_init(struct net_device *dev)
+{
+ dev->hard_start_xmit = imq_dev_xmit;
++ dev->open = imq_open;
++ dev->get_stats = imq_get_stats;
++ dev->stop = imq_close;
+ dev->type = ARPHRD_VOID;
+ dev->mtu = 16000;
+ dev->tx_queue_len = 11000;
+ dev->flags = IFF_NOARP;
-+ dev->priv = kzalloc(sizeof(struct net_device_stats), GFP_KERNEL);
++
++ dev->priv = kzalloc(sizeof(struct imq_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
-+ dev->get_stats = imq_get_stats;
+
+ return 0;
+}
+static int __init imq_init_devs(struct net *net)
+{
+ struct net_device *dev;
-+ int i,j;
-+ j = numdevs;
++ int i, j;
+
+ if (!numdevs || numdevs > IMQ_MAX_DEVS) {
+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
+ return -ENOMEM;
+
+ /* we start counting at zero */
-+ numdevs--;
++ j = numdevs - 1;
+
-+ for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) {
++ for (i = 0, dev = imq_devs; i <= j; i++, dev++) {
+ strcpy(dev->name, "imq%d");
+ dev->init = imq_dev_init;
+ dev->uninit = imq_dev_uninit;
+ if (register_netdev(dev) < 0)
+ goto err_register;
+ }
-+ printk(KERN_INFO "IMQ starting with %u devices...\n", j);
++ printk(KERN_INFO "IMQ starting with %u devices...\n", numdevs);
+ return 0;
+
+err_register:
+{
+ int err;
+
-+ if ((err = imq_init_devs(net))) {
++ err = imq_init_devs(net);
++ if (err) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
+ return err;
+ }
-+ if ((err = imq_init_hooks())) {
++
++ err = imq_init_hooks();
++ if (err) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
+ imq_cleanup_devs();
+ return err;
+}
+
+static struct pernet_operations __net_initdata imq_net_ops = {
-+ .init = imq_init_module,
-+ .exit = imq_exit_module,
++ .init = imq_init_module,
++ .exit = imq_exit_module,
+};
-+
++
+static int __init imq_init(void)
+{
-+ return register_pernet_device(&imq_net_ops);
++ return register_pernet_device(&imq_net_ops);
+}
+
+module_init(imq_init);
-+//module_exit(imq_cleanup_module);
++/*module_exit(imq_cleanup_module);*/
+
+module_param(numdevs, int, 0);
-+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
++MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
++ "be created)");
+MODULE_AUTHOR("http://www.linuximq.net");
-+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
++ "http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
++
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -117,6 +117,129 @@
+config IP_NF_TARGET_IMQ
+ tristate "IMQ target support"
-+ depends on IP_NF_MANGLE && IMQ
++ depends on IP_NF_MANGLE
+ help
+ This option adds a `IMQ' target which is used to specify if and
+ to which IMQ device packets should get enqueued/dequeued.
+config IP6_NF_TARGET_IMQ
+ tristate "IMQ target support"
-+ depends on IP6_NF_MANGLE && IMQ
++ depends on IP6_NF_MANGLE
+ help
+ This option adds a `IMQ' target which is used to specify if and
+ to which imq device packets should get enqueued/dequeued.
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -182,6 +182,12 @@
- return ret;
+@@ -203,6 +203,7 @@
+
+ clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
}
++EXPORT_SYMBOL(__qdisc_run);
-+int qdisc_restart1(struct net_device *dev)
-+{
-+ return qdisc_restart(dev);
-+}
-+EXPORT_SYMBOL(qdisc_restart1);
-+
- void __qdisc_run(struct net_device *dev)
+ static void dev_watchdog(unsigned long arg)
{
- unsigned long start_time = jiffies;
--- /dev/null
+++ b/drivers/net/imq.c
-@@ -0,0 +1,410 @@
+@@ -0,0 +1,464 @@
+/*
+ * Pseudo-driver for the intermediate queue device.
+ *
+ * of IMQ again: http://www.linuximq.net
+ *
+ *
-+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7 including
-+ * the following changes:
++ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
++ * including the following changes:
+ *
+ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
+ * - Correction of imq_init_devs() issue that resulted in
+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
+ * I didn't forget anybody). I apologize again for my lack of time.
+ *
++ *
++ * 2008/06/07 - Changed imq.c to use qdisc_run() instead of
++ * qdisc_restart() and moved qdisc_run() to tasklet to avoid
++ * recursive locking. (Jussi Kivilinna)
++ *
++ *
+ * More info at: http://www.linuximq.net/ (Andre Correa)
+ */
+
+#include <linux/if_arp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ #include <linux/netfilter_ipv6.h>
+#endif
+#include <linux/imq.h>
+#include <net/pkt_sched.h>
+#include <net/netfilter/nf_queue.h>
+
-+extern int qdisc_restart1(struct net_device *dev);
++struct imq_private {
++ struct tasklet_struct tasklet;
++ int tasklet_pending;
++};
+
+static nf_hookfn imq_nf_hook;
+
+#endif
+};
+
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static struct nf_hook_ops imq_ingress_ipv6 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+
+static struct net_device_stats *imq_get_stats(struct net_device *dev)
+{
-+ return (struct net_device_stats *)dev->priv;
++ return &dev->stats;
+}
+
+/* called for packets kfree'd in qdiscs at places other than enqueue */
+static void imq_skb_destructor(struct sk_buff *skb)
+{
-+ struct nf_queue_entry *info = skb->nf_queue_entry;
-+
-+ if (info) {
-+ if (info->indev)
-+ dev_put(info->indev);
-+ if (info->outdev)
-+ dev_put(info->outdev);
-+ kfree(info);
++ struct nf_queue_entry *entry = skb->nf_queue_entry;
++
++ if (entry) {
++ if (entry->indev)
++ dev_put(entry->indev);
++ if (entry->outdev)
++ dev_put(entry->outdev);
++ kfree(entry);
+ }
+}
+
+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
-+ struct net_device_stats *stats = (struct net_device_stats*) dev->priv;
-+
-+ stats->tx_bytes += skb->len;
-+ stats->tx_packets++;
++ dev->stats.tx_bytes += skb->len;
++ dev->stats.tx_packets++;
+
+ skb->imq_flags = 0;
+ skb->destructor = NULL;
+ return 0;
+}
+
-+static int imq_nf_queue(struct nf_queue_entry *info, unsigned queue_num)
++static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
+{
+ struct net_device *dev;
-+ struct net_device_stats *stats;
++ struct imq_private *priv;
+ struct sk_buff *skb2 = NULL;
+ struct Qdisc *q;
-+ unsigned int index = info->skb->imq_flags&IMQ_F_IFMASK;
++ unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK;
+ int ret = -1;
+
+ if (index > numdevs)
+ return -1;
+
+ dev = imq_devs + index;
++ priv = netdev_priv(dev);
+ if (!(dev->flags & IFF_UP)) {
-+ info->skb->imq_flags = 0;
-+ nf_reinject(info, NF_ACCEPT);
++ entry->skb->imq_flags = 0;
++ nf_reinject(entry, NF_ACCEPT);
+ return 0;
+ }
+ dev->last_rx = jiffies;
+
-+ if (info->skb->destructor) {
-+ skb2 = info->skb;
-+ info->skb = skb_clone(info->skb, GFP_ATOMIC);
-+ if (!info->skb)
++ if (entry->skb->destructor) {
++ skb2 = entry->skb;
++ entry->skb = skb_clone(entry->skb, GFP_ATOMIC);
++ if (!entry->skb)
+ return -1;
+ }
-+ info->skb->nf_queue_entry = info;
++ entry->skb->nf_queue_entry = entry;
+
-+ stats = (struct net_device_stats *)dev->priv;
-+ stats->rx_bytes+= info->skb->len;
-+ stats->rx_packets++;
++ dev->stats.rx_bytes += entry->skb->len;
++ dev->stats.rx_packets++;
+
+ spin_lock_bh(&dev->queue_lock);
+ q = dev->qdisc;
+ if (q->enqueue) {
-+ q->enqueue(skb_get(info->skb), q);
-+ if (skb_shared(info->skb)) {
-+ info->skb->destructor = imq_skb_destructor;
-+ kfree_skb(info->skb);
++ q->enqueue(skb_get(entry->skb), q);
++ if (skb_shared(entry->skb)) {
++ entry->skb->destructor = imq_skb_destructor;
++ kfree_skb(entry->skb);
+ ret = 0;
+ }
+ }
-+ if (spin_is_locked(&dev->_xmit_lock))
-+ netif_schedule(dev);
-+ else
-+ while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
-+ /* NOTHING */;
+
+ spin_unlock_bh(&dev->queue_lock);
+
++ if (!test_and_set_bit(1, &priv->tasklet_pending))
++ tasklet_schedule(&priv->tasklet);
++
+ if (skb2)
-+ kfree_skb(ret ? info->skb : skb2);
++ kfree_skb(ret ? entry->skb : skb2);
+
+ return ret;
+}
+ .outfn = imq_nf_queue,
+};
+
++static void qdisc_run_tasklet(unsigned long arg)
++{
++ struct net_device *dev = (struct net_device *)arg;
++ struct imq_private *priv = netdev_priv(dev);
++
++ spin_lock(&dev->queue_lock);
++ qdisc_run(dev);
++ spin_unlock(&dev->queue_lock);
++
++ clear_bit(1, &priv->tasklet_pending);
++}
++
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
+ const struct net_device *indev,
+ const struct net_device *outdev,
+ return NF_ACCEPT;
+}
+
++static int imq_close(struct net_device *dev)
++{
++ struct imq_private *priv = netdev_priv(dev);
++
++ tasklet_kill(&priv->tasklet);
++ netif_stop_queue(dev);
++
++ return 0;
++}
++
++static int imq_open(struct net_device *dev)
++{
++ struct imq_private *priv = netdev_priv(dev);
++
++ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev);
++ netif_start_queue(dev);
++
++ return 0;
++}
+
+static int __init imq_init_hooks(void)
+{
+ int err;
+
+ err = nf_register_queue_handler(PF_INET, &nfqh);
-+ if (err > 0)
++ if (err)
+ goto err1;
-+ if ((err = nf_register_hook(&imq_ingress_ipv4)))
++
++ err = nf_register_hook(&imq_ingress_ipv4);
++ if (err)
+ goto err2;
-+ if ((err = nf_register_hook(&imq_egress_ipv4)))
++
++ err = nf_register_hook(&imq_egress_ipv4);
++ if (err)
+ goto err3;
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-+ if ((err = nf_register_queue_handler(PF_INET6, &nfqh)))
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ err = nf_register_queue_handler(PF_INET6, &nfqh);
++ if (err)
+ goto err4;
-+ if ((err = nf_register_hook(&imq_ingress_ipv6)))
++
++ err = nf_register_hook(&imq_ingress_ipv6);
++ if (err)
+ goto err5;
-+ if ((err = nf_register_hook(&imq_egress_ipv6)))
++
++ err = nf_register_hook(&imq_egress_ipv6);
++ if (err)
+ goto err6;
+#endif
+
+ return 0;
+
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+err6:
+ nf_unregister_hook(&imq_ingress_ipv6);
+err5:
+
+static void __exit imq_unhook(void)
+{
-+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ nf_unregister_hook(&imq_ingress_ipv6);
+ nf_unregister_hook(&imq_egress_ipv6);
+ nf_unregister_queue_handler(PF_INET6, &nfqh);
+static int __init imq_dev_init(struct net_device *dev)
+{
+ dev->hard_start_xmit = imq_dev_xmit;
++ dev->open = imq_open;
++ dev->get_stats = imq_get_stats;
++ dev->stop = imq_close;
+ dev->type = ARPHRD_VOID;
+ dev->mtu = 16000;
+ dev->tx_queue_len = 11000;
+ dev->flags = IFF_NOARP;
-+ dev->priv = kzalloc(sizeof(struct net_device_stats), GFP_KERNEL);
++
++ dev->priv = kzalloc(sizeof(struct imq_private), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
-+ dev->get_stats = imq_get_stats;
+
+ return 0;
+}
+static int __init imq_init_devs(struct net *net)
+{
+ struct net_device *dev;
-+ int i,j;
-+ j = numdevs;
++ int i, j;
+
+ if (!numdevs || numdevs > IMQ_MAX_DEVS) {
+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
+ return -ENOMEM;
+
+ /* we start counting at zero */
-+ numdevs--;
++ j = numdevs - 1;
+
-+ for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) {
++ for (i = 0, dev = imq_devs; i <= j; i++, dev++) {
+ strcpy(dev->name, "imq%d");
+ dev->init = imq_dev_init;
+ dev->uninit = imq_dev_uninit;
+ if (register_netdev(dev) < 0)
+ goto err_register;
+ }
-+ printk(KERN_INFO "IMQ starting with %u devices...\n", j);
++ printk(KERN_INFO "IMQ starting with %u devices...\n", numdevs);
+ return 0;
+
+err_register:
+{
+ int err;
+
-+ if ((err = imq_init_devs(net))) {
++ err = imq_init_devs(net);
++ if (err) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
+ return err;
+ }
-+ if ((err = imq_init_hooks())) {
++
++ err = imq_init_hooks();
++ if (err) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
+ imq_cleanup_devs();
+ return err;
+}
+
+static struct pernet_operations __net_initdata imq_net_ops = {
-+ .init = imq_init_module,
-+ .exit = imq_exit_module,
++ .init = imq_init_module,
++ .exit = imq_exit_module,
+};
-+
++
+static int __init imq_init(void)
+{
-+ return register_pernet_device(&imq_net_ops);
++ return register_pernet_device(&imq_net_ops);
+}
+
+module_init(imq_init);
-+//module_exit(imq_cleanup_module);
++/*module_exit(imq_cleanup_module);*/
+
+module_param(numdevs, int, 0);
-+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
++MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
++ "be created)");
+MODULE_AUTHOR("http://www.linuximq.net");
-+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
++MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
++ "http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
++
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -117,6 +117,129 @@
select CRC32
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
-@@ -142,6 +142,7 @@
+@@ -143,6 +143,7 @@
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_DUMMY) += dummy.o
+#endif /* _IP6T_IMQ_H */
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -300,6 +300,10 @@
+@@ -296,6 +296,10 @@
struct nf_conntrack *nfct;
struct sk_buff *nfct_reasm;
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge;
#endif
-@@ -1633,6 +1637,10 @@
+@@ -1736,6 +1740,10 @@
dst->nfct_reasm = src->nfct_reasm;
nf_conntrack_get_reasm(src->nfct_reasm);
#endif
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
-@@ -1545,7 +1548,11 @@
+@@ -1537,7 +1540,11 @@
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
if (likely(!skb->next)) {
+config IP_NF_TARGET_IMQ
+ tristate "IMQ target support"
-+ depends on IP_NF_MANGLE && IMQ
++ depends on IP_NF_MANGLE
+ help
+ This option adds a `IMQ' target which is used to specify if and
+ to which IMQ device packets should get enqueued/dequeued.
depends on IP_NF_FILTER
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
-@@ -58,6 +58,7 @@
+@@ -55,6 +55,7 @@
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
+config IP6_NF_TARGET_IMQ
+ tristate "IMQ target support"
-+ depends on IP6_NF_MANGLE && IMQ
++ depends on IP6_NF_MANGLE
+ help
+ This option adds a `IMQ' target which is used to specify if and
+ to which imq device packets should get enqueued/dequeued.
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -182,6 +182,12 @@
- return ret;
+@@ -203,6 +203,7 @@
+
+ clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
}
++EXPORT_SYMBOL(__qdisc_run);
-+int qdisc_restart1(struct net_device *dev)
-+{
-+ return qdisc_restart(dev);
-+}
-+EXPORT_SYMBOL(qdisc_restart1);
-+
- void __qdisc_run(struct net_device *dev)
+ static void dev_watchdog(unsigned long arg)
{
- unsigned long start_time = jiffies;