From: Andy Boyett Date: Fri, 15 May 2009 10:35:56 +0000 (+0000) Subject: generic-2.4: renumber netfilter patches X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=94921686de5f1ded59d5b3bc79812b31bf4fca48;p=openwrt%2Fstaging%2Flynxis%2Fomap.git generic-2.4: renumber netfilter patches both the netfilter_random and tc_esfq patches were numbered 621 in the patch series, bumped index of tc_esfq and all following 62x patches by one SVN-Revision: 15859 --- diff --git a/target/linux/generic-2.4/patches/621-tc_esfq.patch b/target/linux/generic-2.4/patches/621-tc_esfq.patch deleted file mode 100644 index 2a05f03456..0000000000 --- a/target/linux/generic-2.4/patches/621-tc_esfq.patch +++ /dev/null @@ -1,748 +0,0 @@ -Index: linux-2.4.35.4/Documentation/Configure.help -=================================================================== ---- linux-2.4.35.4.orig/Documentation/Configure.help -+++ linux-2.4.35.4/Documentation/Configure.help -@@ -11153,6 +11153,24 @@ CONFIG_NET_SCH_HFSC - whenever you want). If you want to compile it as a module, say M - here and read . - -+ESFQ queue -+CONFIG_NET_SCH_ESFQ -+ Say Y here if you want to use the Stochastic Fairness Queueing (SFQ) -+ packet scheduling algorithm for some of your network devices or as a -+ leaf discipline for the CBQ scheduling algorithm (see the top of -+ for details and references about the SFQ -+ algorithm). -+ -+ This is an enchanced SFQ version which allows you to control the -+ hardcoded values in the SFQ scheduler: queue depth, hash table size, -+ queues limit. Also adds control to the hash function used to identify -+ packet flows. Hash by src or dst ip and original sfq hash. -+ -+ This code is also available as a module called sch_esfq.o ( = code -+ which can be inserted in and removed from the running kernel -+ whenever you want). If you want to compile it as a module, say M -+ here and read . -+ - CSZ packet scheduler - CONFIG_NET_SCH_CSZ - Say Y here if you want to use the Clark-Shenker-Zhang (CSZ) packet -Index: linux-2.4.35.4/include/linux/pkt_sched.h -=================================================================== ---- linux-2.4.35.4.orig/include/linux/pkt_sched.h -+++ linux-2.4.35.4/include/linux/pkt_sched.h -@@ -173,8 +173,36 @@ struct tc_sfq_qopt - * - * The only reason for this is efficiency, it is possible - * to change these parameters in compile time. -+ * -+ * If you need to play with these values use esfq instead. - */ - -+/* ESFQ section */ -+ -+enum -+{ -+ /* traditional */ -+ TCA_SFQ_HASH_CLASSIC, -+ TCA_SFQ_HASH_DST, -+ TCA_SFQ_HASH_SRC, -+ /* conntrack */ -+ TCA_SFQ_HASH_CTORIGDST, -+ TCA_SFQ_HASH_CTORIGSRC, -+ TCA_SFQ_HASH_CTREPLDST, -+ TCA_SFQ_HASH_CTREPLSRC, -+ TCA_SFQ_HASH_CTNATCHG, -+}; -+ -+struct tc_esfq_qopt -+{ -+ unsigned quantum; /* Bytes per round allocated to flow */ -+ int perturb_period; /* Period of hash perturbation */ -+ __u32 limit; /* Maximal packets in queue */ -+ unsigned divisor; /* Hash divisor */ -+ unsigned flows; /* Maximal number of flows */ -+ unsigned hash_kind; /* Hash function to use for flow identification */ -+}; -+ - /* RED section */ - - enum -Index: linux-2.4.35.4/net/sched/Config.in -=================================================================== ---- linux-2.4.35.4.orig/net/sched/Config.in -+++ linux-2.4.35.4/net/sched/Config.in -@@ -12,6 +12,7 @@ fi - tristate ' The simplest PRIO pseudoscheduler' CONFIG_NET_SCH_PRIO - tristate ' RED queue' CONFIG_NET_SCH_RED - tristate ' SFQ queue' CONFIG_NET_SCH_SFQ -+tristate ' ESFQ queue' CONFIG_NET_SCH_ESFQ - tristate ' TEQL queue' CONFIG_NET_SCH_TEQL - tristate ' TBF queue' CONFIG_NET_SCH_TBF - tristate ' GRED queue' CONFIG_NET_SCH_GRED -Index: linux-2.4.35.4/net/sched/Makefile -=================================================================== ---- linux-2.4.35.4.orig/net/sched/Makefile -+++ linux-2.4.35.4/net/sched/Makefile -@@ -19,6 +19,7 @@ obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o - obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o - obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o - obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o -+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o - obj-$(CONFIG_NET_SCH_RED) += sch_red.o - obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o - obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o -Index: linux-2.4.35.4/net/sched/sch_esfq.c -=================================================================== ---- /dev/null -+++ linux-2.4.35.4/net/sched/sch_esfq.c -@@ -0,0 +1,649 @@ -+/* -+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version -+ * 2 of the License, or (at your option) any later version. -+ * -+ * Authors: Alexey Kuznetsov, -+ * -+ * Changes: Alexander Atanasov, -+ * Added dynamic depth,limit,divisor,hash_kind options. -+ * Added dst and src hashes. -+ * -+ * Alexander Clouter, -+ * Ported ESFQ to Linux 2.6. -+ * -+ * Corey Hickey, -+ * Maintenance of the Linux 2.6 port. -+ * Added fwmark hash (thanks to Robert Kurjata). -+ * Added usage of jhash. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define IPPROTO_DCCP 33 -+#define qdisc_priv(q) ((void *)(q->data)) -+ -+#ifdef CONFIG_IP_NF_CONNTRACK -+/* #include */ -+#include -+#endif -+ -+/* Stochastic Fairness Queuing algorithm. -+ For more comments look at sch_sfq.c. -+ The difference is that you can change limit, depth, -+ hash table size and choose alternate hash types. -+ -+ classic: same as in sch_sfq.c -+ dst: destination IP address -+ src: source IP address -+ ctorigdst: original destination IP address -+ ctorigsrc: original source IP address -+ ctrepldst: reply destination IP address -+ ctreplsrc: reply source IP -+ ctnatchg: use the address which changed via nat -+ -+*/ -+ -+ -+/* This type should contain at least SFQ_DEPTH*2 values */ -+typedef unsigned int esfq_index; -+ -+struct esfq_head -+{ -+ esfq_index next; -+ esfq_index prev; -+}; -+ -+struct esfq_sched_data -+{ -+/* Parameters */ -+ int perturb_period; -+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */ -+ int limit; -+ unsigned depth; -+ unsigned hash_divisor; -+ unsigned hash_kind; -+/* Variables */ -+ struct timer_list perturb_timer; -+ int perturbation; -+ esfq_index tail; /* Index of current slot in round */ -+ esfq_index max_depth; /* Maximal depth */ -+ -+ esfq_index *ht; /* Hash table */ -+ esfq_index *next; /* Active slots link */ -+ short *allot; /* Current allotment per slot */ -+ unsigned short *hash; /* Hash value indexed by slots */ -+ struct sk_buff_head *qs; /* Slot queue */ -+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */ -+ unsigned dyn_min; /* For dynamic divisor adjustment; minimum value seen */ -+ unsigned dyn_max; /* maximum value seen */ -+ unsigned dyn_range; /* saved range */ -+}; -+ -+/* This contains the info we will hash. */ -+struct esfq_packet_info -+{ -+ u32 proto; /* protocol or port */ -+ u32 src; /* source from packet header */ -+ u32 dst; /* destination from packet header */ -+ u32 ctorigsrc; /* original source from conntrack */ -+ u32 ctorigdst; /* original destination from conntrack */ -+ u32 ctreplsrc; /* reply source from conntrack */ -+ u32 ctrepldst; /* reply destination from conntrack */ -+}; -+ -+static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a) -+{ -+ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1); -+} -+ -+static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b) -+{ -+ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1); -+} -+ -+static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c) -+{ -+ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1); -+} -+ -+ -+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb) -+{ -+ struct esfq_packet_info info; -+#ifdef CONFIG_IP_NF_CONNTRACK -+ enum ip_conntrack_info ctinfo; -+ struct ip_conntrack *ct = ip_conntrack_get(skb, &ctinfo); -+#endif -+ -+ switch (skb->protocol) { -+ case __constant_htons(ETH_P_IP): -+ { -+ struct iphdr *iph = skb->nh.iph; -+ info.dst = iph->daddr; -+ info.src = iph->saddr; -+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && -+ (iph->protocol == IPPROTO_TCP || -+ iph->protocol == IPPROTO_UDP || -+ iph->protocol == IPPROTO_SCTP || -+ iph->protocol == IPPROTO_DCCP || -+ iph->protocol == IPPROTO_ESP)) -+ info.proto = *(((u32*)iph) + iph->ihl); -+ else -+ info.proto = iph->protocol; -+ break; -+ } -+ default: -+ info.dst = (u32)(unsigned long)skb->dst; -+ info.src = (u32)(unsigned long)skb->sk; -+ info.proto = skb->protocol; -+ } -+ -+#ifdef CONFIG_IP_NF_CONNTRACK -+ /* defaults if there is no conntrack info */ -+ info.ctorigsrc = info.src; -+ info.ctorigdst = info.dst; -+ info.ctreplsrc = info.dst; -+ info.ctrepldst = info.src; -+ /* collect conntrack info */ -+ IP_NF_ASSERT(ct); -+ if (ct) { -+ if (skb->protocol == __constant_htons(ETH_P_IP)) { -+ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; -+ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip; -+ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip; -+ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip; -+ } -+ } -+#endif -+ -+ switch(q->hash_kind) -+ { -+ case TCA_SFQ_HASH_CLASSIC: -+ return esfq_jhash_3words(q, info.dst, info.src, info.proto); -+ case TCA_SFQ_HASH_DST: -+ return esfq_jhash_1word(q, info.dst); -+ case TCA_SFQ_HASH_SRC: -+ return esfq_jhash_1word(q, info.src); -+#ifdef CONFIG_IP_NF_CONNTRACK -+ case TCA_SFQ_HASH_CTORIGDST: -+ return esfq_jhash_1word(q, info.ctorigdst); -+ case TCA_SFQ_HASH_CTORIGSRC: -+ return esfq_jhash_1word(q, info.ctorigsrc); -+ case TCA_SFQ_HASH_CTREPLDST: -+ return esfq_jhash_1word(q, info.ctrepldst); -+ case TCA_SFQ_HASH_CTREPLSRC: -+ return esfq_jhash_1word(q, info.ctreplsrc); -+ case TCA_SFQ_HASH_CTNATCHG: -+ { -+ if (info.ctorigdst == info.ctreplsrc) -+ return esfq_jhash_1word(q, info.ctorigsrc); -+ else -+ return esfq_jhash_1word(q, info.ctreplsrc); -+ } -+#endif -+ default: -+ if (net_ratelimit()) -+ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n"); -+ } -+ return esfq_jhash_3words(q, info.dst, info.src, info.proto); -+} -+ -+static inline void esfq_link(struct esfq_sched_data *q, esfq_index x) -+{ -+ esfq_index p, n; -+ int d = q->qs[x].qlen + q->depth; -+ -+ p = d; -+ n = q->dep[d].next; -+ q->dep[x].next = n; -+ q->dep[x].prev = p; -+ q->dep[p].next = q->dep[n].prev = x; -+} -+ -+static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x) -+{ -+ esfq_index p, n; -+ -+ n = q->dep[x].next; -+ p = q->dep[x].prev; -+ q->dep[p].next = n; -+ q->dep[n].prev = p; -+ -+ if (n == p && q->max_depth == q->qs[x].qlen + 1) -+ q->max_depth--; -+ -+ esfq_link(q, x); -+} -+ -+static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x) -+{ -+ esfq_index p, n; -+ int d; -+ -+ n = q->dep[x].next; -+ p = q->dep[x].prev; -+ q->dep[p].next = n; -+ q->dep[n].prev = p; -+ d = q->qs[x].qlen; -+ if (q->max_depth < d) -+ q->max_depth = d; -+ -+ esfq_link(q, x); -+} -+ -+static unsigned int esfq_drop(struct Qdisc *sch) -+{ -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ esfq_index d = q->max_depth; -+ struct sk_buff *skb; -+ unsigned int len; -+ -+ /* Queue is full! Find the longest slot and -+ drop a packet from it */ -+ -+ if (d > 1) { -+ esfq_index x = q->dep[d+q->depth].next; -+ skb = q->qs[x].prev; -+ len = skb->len; -+ __skb_unlink(skb, &q->qs[x]); -+ kfree_skb(skb); -+ esfq_dec(q, x); -+ sch->q.qlen--; -+ sch->stats.drops++; -+ sch->stats.backlog -= len; -+ return len; -+ } -+ -+ if (d == 1) { -+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ -+ d = q->next[q->tail]; -+ q->next[q->tail] = q->next[d]; -+ q->allot[q->next[d]] += q->quantum; -+ skb = q->qs[d].prev; -+ len = skb->len; -+ __skb_unlink(skb, &q->qs[d]); -+ kfree_skb(skb); -+ esfq_dec(q, d); -+ sch->q.qlen--; -+ q->ht[q->hash[d]] = q->depth; -+ sch->stats.drops++; -+ sch->stats.backlog -= len; -+ return len; -+ } -+ -+ return 0; -+} -+ -+static int -+esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) -+{ -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ unsigned hash = esfq_hash(q, skb); -+ unsigned depth = q->depth; -+ esfq_index x; -+ -+ x = q->ht[hash]; -+ if (x == depth) { -+ q->ht[hash] = x = q->dep[depth].next; -+ q->hash[x] = hash; -+ } -+ sch->stats.backlog += skb->len; -+ __skb_queue_tail(&q->qs[x], skb); -+ esfq_inc(q, x); -+ if (q->qs[x].qlen == 1) { /* The flow is new */ -+ if (q->tail == depth) { /* It is the first flow */ -+ q->tail = x; -+ q->next[x] = x; -+ q->allot[x] = q->quantum; -+ } else { -+ q->next[x] = q->next[q->tail]; -+ q->next[q->tail] = x; -+ q->tail = x; -+ } -+ } -+ if (++sch->q.qlen < q->limit-1) { -+ sch->stats.bytes += skb->len; -+ sch->stats.packets++; -+ return 0; -+ } -+ -+ esfq_drop(sch); -+ return NET_XMIT_CN; -+} -+ -+static int -+esfq_requeue(struct sk_buff *skb, struct Qdisc* sch) -+{ -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ unsigned hash = esfq_hash(q, skb); -+ unsigned depth = q->depth; -+ esfq_index x; -+ -+ x = q->ht[hash]; -+ if (x == depth) { -+ q->ht[hash] = x = q->dep[depth].next; -+ q->hash[x] = hash; -+ } -+ sch->stats.backlog += skb->len; -+ __skb_queue_head(&q->qs[x], skb); -+ esfq_inc(q, x); -+ if (q->qs[x].qlen == 1) { /* The flow is new */ -+ if (q->tail == depth) { /* It is the first flow */ -+ q->tail = x; -+ q->next[x] = x; -+ q->allot[x] = q->quantum; -+ } else { -+ q->next[x] = q->next[q->tail]; -+ q->next[q->tail] = x; -+ q->tail = x; -+ } -+ } -+ if (++sch->q.qlen < q->limit - 1) { -+ return 0; -+ } -+ -+ sch->stats.drops++; -+ esfq_drop(sch); -+ return NET_XMIT_CN; -+} -+ -+ -+ -+ -+static struct sk_buff * -+esfq_dequeue(struct Qdisc* sch) -+{ -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ struct sk_buff *skb; -+ unsigned depth = q->depth; -+ esfq_index a, old_a; -+ -+ /* No active slots */ -+ if (q->tail == depth) -+ return NULL; -+ -+ a = old_a = q->next[q->tail]; -+ -+ /* Grab packet */ -+ skb = __skb_dequeue(&q->qs[a]); -+ esfq_dec(q, a); -+ sch->q.qlen--; -+ sch->stats.backlog -= skb->len; -+ -+ /* Is the slot empty? */ -+ if (q->qs[a].qlen == 0) { -+ q->ht[q->hash[a]] = depth; -+ a = q->next[a]; -+ if (a == old_a) { -+ q->tail = depth; -+ return skb; -+ } -+ q->next[q->tail] = a; -+ q->allot[a] += q->quantum; -+ } else if ((q->allot[a] -= skb->len) <= 0) { -+ q->tail = a; -+ a = q->next[a]; -+ q->allot[a] += q->quantum; -+ } -+ -+ return skb; -+} -+ -+static void -+esfq_reset(struct Qdisc* sch) -+{ -+ struct sk_buff *skb; -+ -+ while ((skb = esfq_dequeue(sch)) != NULL) -+ kfree_skb(skb); -+} -+ -+static void esfq_perturbation(unsigned long arg) -+{ -+ struct Qdisc *sch = (struct Qdisc*)arg; -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ -+ q->perturbation = net_random()&0x1F; -+ -+ if (q->perturb_period) { -+ q->perturb_timer.expires = jiffies + q->perturb_period; -+ add_timer(&q->perturb_timer); -+ } -+} -+ -+static int esfq_change(struct Qdisc *sch, struct rtattr *opt) -+{ -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ struct tc_esfq_qopt *ctl = RTA_DATA(opt); -+ int old_perturb = q->perturb_period; -+ -+ if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) -+ return -EINVAL; -+ -+ sch_tree_lock(sch); -+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev); -+ q->perturb_period = ctl->perturb_period*HZ; -+// q->hash_divisor = ctl->divisor; -+// q->tail = q->limit = q->depth = ctl->flows; -+ -+ if (ctl->limit) -+ q->limit = min_t(u32, ctl->limit, q->depth); -+ -+ if (ctl->hash_kind) { -+ q->hash_kind = ctl->hash_kind; -+ if (q->hash_kind != TCA_SFQ_HASH_CLASSIC) -+ q->perturb_period = 0; -+ } -+ -+ // is sch_tree_lock enough to do this ? -+ while (sch->q.qlen >= q->limit-1) -+ esfq_drop(sch); -+ -+ if (old_perturb) -+ del_timer(&q->perturb_timer); -+ if (q->perturb_period) { -+ q->perturb_timer.expires = jiffies + q->perturb_period; -+ add_timer(&q->perturb_timer); -+ } else { -+ q->perturbation = 0; -+ } -+ sch_tree_unlock(sch); -+ return 0; -+} -+ -+static int esfq_init(struct Qdisc *sch, struct rtattr *opt) -+{ -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ struct tc_esfq_qopt *ctl; -+ esfq_index p = ~0U/2; -+ int i; -+ -+ if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl))) -+ return -EINVAL; -+ -+ init_timer(&q->perturb_timer); -+ q->perturb_timer.data = (unsigned long)sch; -+ q->perturb_timer.function = esfq_perturbation; -+ q->perturbation = 0; -+ q->hash_kind = TCA_SFQ_HASH_CLASSIC; -+ q->max_depth = 0; -+ q->dyn_min = ~0U; /* maximum value for this type */ -+ q->dyn_max = 0; /* dyn_min/dyn_max will be set properly upon first packet */ -+ if (opt == NULL) { -+ q->quantum = psched_mtu(sch->dev); -+ q->perturb_period = 0; -+ q->hash_divisor = 1024; -+ q->tail = q->limit = q->depth = 128; -+ -+ } else { -+ ctl = RTA_DATA(opt); -+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev); -+ q->perturb_period = ctl->perturb_period*HZ; -+ q->hash_divisor = ctl->divisor ? : 1024; -+ q->tail = q->limit = q->depth = ctl->flows ? : 128; -+ -+ if ( q->depth > p - 1 ) -+ return -EINVAL; -+ -+ if (ctl->limit) -+ q->limit = min_t(u32, ctl->limit, q->depth); -+ -+ if (ctl->hash_kind) { -+ q->hash_kind = ctl->hash_kind; -+ } -+ -+ if (q->perturb_period) { -+ q->perturb_timer.expires = jiffies + q->perturb_period; -+ add_timer(&q->perturb_timer); -+ } -+ } -+ -+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL); -+ if (!q->ht) -+ goto err_case; -+ -+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL); -+ if (!q->dep) -+ goto err_case; -+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL); -+ if (!q->next) -+ goto err_case; -+ -+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL); -+ if (!q->allot) -+ goto err_case; -+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL); -+ if (!q->hash) -+ goto err_case; -+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL); -+ if (!q->qs) -+ goto err_case; -+ -+ for (i=0; i< q->hash_divisor; i++) -+ q->ht[i] = q->depth; -+ for (i=0; idepth; i++) { -+ skb_queue_head_init(&q->qs[i]); -+ q->dep[i+q->depth].next = i+q->depth; -+ q->dep[i+q->depth].prev = i+q->depth; -+ } -+ -+ for (i=0; idepth; i++) -+ esfq_link(q, i); -+ return 0; -+err_case: -+ del_timer(&q->perturb_timer); -+ if (q->ht) -+ kfree(q->ht); -+ if (q->dep) -+ kfree(q->dep); -+ if (q->next) -+ kfree(q->next); -+ if (q->allot) -+ kfree(q->allot); -+ if (q->hash) -+ kfree(q->hash); -+ if (q->qs) -+ kfree(q->qs); -+ return -ENOBUFS; -+} -+ -+static void esfq_destroy(struct Qdisc *sch) -+{ -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ del_timer(&q->perturb_timer); -+ if(q->ht) -+ kfree(q->ht); -+ if(q->dep) -+ kfree(q->dep); -+ if(q->next) -+ kfree(q->next); -+ if(q->allot) -+ kfree(q->allot); -+ if(q->hash) -+ kfree(q->hash); -+ if(q->qs) -+ kfree(q->qs); -+} -+ -+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb) -+{ -+ struct esfq_sched_data *q = qdisc_priv(sch); -+ unsigned char *b = skb->tail; -+ struct tc_esfq_qopt opt; -+ -+ opt.quantum = q->quantum; -+ opt.perturb_period = q->perturb_period/HZ; -+ -+ opt.limit = q->limit; -+ opt.divisor = q->hash_divisor; -+ opt.flows = q->depth; -+ opt.hash_kind = q->hash_kind; -+ -+ RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); -+ -+ return skb->len; -+ -+rtattr_failure: -+ skb_trim(skb, b - skb->data); -+ return -1; -+} -+ -+static struct Qdisc_ops esfq_qdisc_ops = -+{ -+ .next = NULL, -+ .cl_ops = NULL, -+ .id = "esfq", -+ .priv_size = sizeof(struct esfq_sched_data), -+ .enqueue = esfq_enqueue, -+ .dequeue = esfq_dequeue, -+ .requeue = esfq_requeue, -+ .drop = esfq_drop, -+ .init = esfq_init, -+ .reset = esfq_reset, -+ .destroy = esfq_destroy, -+ .change = NULL, /* esfq_change - needs more work */ -+ .dump = esfq_dump, -+}; -+ -+static int __init esfq_module_init(void) -+{ -+ return register_qdisc(&esfq_qdisc_ops); -+} -+static void __exit esfq_module_exit(void) -+{ -+ unregister_qdisc(&esfq_qdisc_ops); -+} -+module_init(esfq_module_init) -+module_exit(esfq_module_exit) -+MODULE_LICENSE("GPL"); diff --git a/target/linux/generic-2.4/patches/622-netfilter_ipset_porthash.patch b/target/linux/generic-2.4/patches/622-netfilter_ipset_porthash.patch deleted file mode 100644 index 0fc1ec6219..0000000000 --- a/target/linux/generic-2.4/patches/622-netfilter_ipset_porthash.patch +++ /dev/null @@ -1,39 +0,0 @@ -Index: linux-2.4.35.4/include/linux/netfilter_ipv4/ip_set_ipporthash.h -=================================================================== ---- /dev/null -+++ linux-2.4.35.4/include/linux/netfilter_ipv4/ip_set_ipporthash.h -@@ -0,0 +1,34 @@ -+#ifndef __IP_SET_IPPORTHASH_H -+#define __IP_SET_IPPORTHASH_H -+ -+#include -+ -+#define SETTYPE_NAME "ipporthash" -+#define MAX_RANGE 0x0000FFFF -+#define INVALID_PORT (MAX_RANGE + 1) -+ -+struct ip_set_ipporthash { -+ ip_set_ip_t *members; /* the ipporthash proper */ -+ uint32_t elements; /* number of elements */ -+ uint32_t hashsize; /* hash size */ -+ uint16_t probes; /* max number of probes */ -+ uint16_t resize; /* resize factor in percent */ -+ ip_set_ip_t first_ip; /* host byte order, included in range */ -+ ip_set_ip_t last_ip; /* host byte order, included in range */ -+ void *initval[0]; /* initvals for jhash_1word */ -+}; -+ -+struct ip_set_req_ipporthash_create { -+ uint32_t hashsize; -+ uint16_t probes; -+ uint16_t resize; -+ ip_set_ip_t from; -+ ip_set_ip_t to; -+}; -+ -+struct ip_set_req_ipporthash { -+ ip_set_ip_t ip; -+ ip_set_ip_t port; -+}; -+ -+#endif /* __IP_SET_IPPORTHASH_H */ diff --git a/target/linux/generic-2.4/patches/622-tc_esfq.patch b/target/linux/generic-2.4/patches/622-tc_esfq.patch new file mode 100644 index 0000000000..2a05f03456 --- /dev/null +++ b/target/linux/generic-2.4/patches/622-tc_esfq.patch @@ -0,0 +1,748 @@ +Index: linux-2.4.35.4/Documentation/Configure.help +=================================================================== +--- linux-2.4.35.4.orig/Documentation/Configure.help ++++ linux-2.4.35.4/Documentation/Configure.help +@@ -11153,6 +11153,24 @@ CONFIG_NET_SCH_HFSC + whenever you want). If you want to compile it as a module, say M + here and read . + ++ESFQ queue ++CONFIG_NET_SCH_ESFQ ++ Say Y here if you want to use the Stochastic Fairness Queueing (SFQ) ++ packet scheduling algorithm for some of your network devices or as a ++ leaf discipline for the CBQ scheduling algorithm (see the top of ++ for details and references about the SFQ ++ algorithm). ++ ++ This is an enchanced SFQ version which allows you to control the ++ hardcoded values in the SFQ scheduler: queue depth, hash table size, ++ queues limit. Also adds control to the hash function used to identify ++ packet flows. Hash by src or dst ip and original sfq hash. ++ ++ This code is also available as a module called sch_esfq.o ( = code ++ which can be inserted in and removed from the running kernel ++ whenever you want). If you want to compile it as a module, say M ++ here and read . ++ + CSZ packet scheduler + CONFIG_NET_SCH_CSZ + Say Y here if you want to use the Clark-Shenker-Zhang (CSZ) packet +Index: linux-2.4.35.4/include/linux/pkt_sched.h +=================================================================== +--- linux-2.4.35.4.orig/include/linux/pkt_sched.h ++++ linux-2.4.35.4/include/linux/pkt_sched.h +@@ -173,8 +173,36 @@ struct tc_sfq_qopt + * + * The only reason for this is efficiency, it is possible + * to change these parameters in compile time. ++ * ++ * If you need to play with these values use esfq instead. + */ + ++/* ESFQ section */ ++ ++enum ++{ ++ /* traditional */ ++ TCA_SFQ_HASH_CLASSIC, ++ TCA_SFQ_HASH_DST, ++ TCA_SFQ_HASH_SRC, ++ /* conntrack */ ++ TCA_SFQ_HASH_CTORIGDST, ++ TCA_SFQ_HASH_CTORIGSRC, ++ TCA_SFQ_HASH_CTREPLDST, ++ TCA_SFQ_HASH_CTREPLSRC, ++ TCA_SFQ_HASH_CTNATCHG, ++}; ++ ++struct tc_esfq_qopt ++{ ++ unsigned quantum; /* Bytes per round allocated to flow */ ++ int perturb_period; /* Period of hash perturbation */ ++ __u32 limit; /* Maximal packets in queue */ ++ unsigned divisor; /* Hash divisor */ ++ unsigned flows; /* Maximal number of flows */ ++ unsigned hash_kind; /* Hash function to use for flow identification */ ++}; ++ + /* RED section */ + + enum +Index: linux-2.4.35.4/net/sched/Config.in +=================================================================== +--- linux-2.4.35.4.orig/net/sched/Config.in ++++ linux-2.4.35.4/net/sched/Config.in +@@ -12,6 +12,7 @@ fi + tristate ' The simplest PRIO pseudoscheduler' CONFIG_NET_SCH_PRIO + tristate ' RED queue' CONFIG_NET_SCH_RED + tristate ' SFQ queue' CONFIG_NET_SCH_SFQ ++tristate ' ESFQ queue' CONFIG_NET_SCH_ESFQ + tristate ' TEQL queue' CONFIG_NET_SCH_TEQL + tristate ' TBF queue' CONFIG_NET_SCH_TBF + tristate ' GRED queue' CONFIG_NET_SCH_GRED +Index: linux-2.4.35.4/net/sched/Makefile +=================================================================== +--- linux-2.4.35.4.orig/net/sched/Makefile ++++ linux-2.4.35.4/net/sched/Makefile +@@ -19,6 +19,7 @@ obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o + obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o + obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o + obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o ++obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o + obj-$(CONFIG_NET_SCH_RED) += sch_red.o + obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o + obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o +Index: linux-2.4.35.4/net/sched/sch_esfq.c +=================================================================== +--- /dev/null ++++ linux-2.4.35.4/net/sched/sch_esfq.c +@@ -0,0 +1,649 @@ ++/* ++ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ * ++ * Authors: Alexey Kuznetsov, ++ * ++ * Changes: Alexander Atanasov, ++ * Added dynamic depth,limit,divisor,hash_kind options. ++ * Added dst and src hashes. ++ * ++ * Alexander Clouter, ++ * Ported ESFQ to Linux 2.6. ++ * ++ * Corey Hickey, ++ * Maintenance of the Linux 2.6 port. ++ * Added fwmark hash (thanks to Robert Kurjata). ++ * Added usage of jhash. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define IPPROTO_DCCP 33 ++#define qdisc_priv(q) ((void *)(q->data)) ++ ++#ifdef CONFIG_IP_NF_CONNTRACK ++/* #include */ ++#include ++#endif ++ ++/* Stochastic Fairness Queuing algorithm. ++ For more comments look at sch_sfq.c. ++ The difference is that you can change limit, depth, ++ hash table size and choose alternate hash types. ++ ++ classic: same as in sch_sfq.c ++ dst: destination IP address ++ src: source IP address ++ ctorigdst: original destination IP address ++ ctorigsrc: original source IP address ++ ctrepldst: reply destination IP address ++ ctreplsrc: reply source IP ++ ctnatchg: use the address which changed via nat ++ ++*/ ++ ++ ++/* This type should contain at least SFQ_DEPTH*2 values */ ++typedef unsigned int esfq_index; ++ ++struct esfq_head ++{ ++ esfq_index next; ++ esfq_index prev; ++}; ++ ++struct esfq_sched_data ++{ ++/* Parameters */ ++ int perturb_period; ++ unsigned quantum; /* Allotment per round: MUST BE >= MTU */ ++ int limit; ++ unsigned depth; ++ unsigned hash_divisor; ++ unsigned hash_kind; ++/* Variables */ ++ struct timer_list perturb_timer; ++ int perturbation; ++ esfq_index tail; /* Index of current slot in round */ ++ esfq_index max_depth; /* Maximal depth */ ++ ++ esfq_index *ht; /* Hash table */ ++ esfq_index *next; /* Active slots link */ ++ short *allot; /* Current allotment per slot */ ++ unsigned short *hash; /* Hash value indexed by slots */ ++ struct sk_buff_head *qs; /* Slot queue */ ++ struct esfq_head *dep; /* Linked list of slots, indexed by depth */ ++ unsigned dyn_min; /* For dynamic divisor adjustment; minimum value seen */ ++ unsigned dyn_max; /* maximum value seen */ ++ unsigned dyn_range; /* saved range */ ++}; ++ ++/* This contains the info we will hash. */ ++struct esfq_packet_info ++{ ++ u32 proto; /* protocol or port */ ++ u32 src; /* source from packet header */ ++ u32 dst; /* destination from packet header */ ++ u32 ctorigsrc; /* original source from conntrack */ ++ u32 ctorigdst; /* original destination from conntrack */ ++ u32 ctreplsrc; /* reply source from conntrack */ ++ u32 ctrepldst; /* reply destination from conntrack */ ++}; ++ ++static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a) ++{ ++ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1); ++} ++ ++static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b) ++{ ++ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1); ++} ++ ++static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c) ++{ ++ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1); ++} ++ ++ ++static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb) ++{ ++ struct esfq_packet_info info; ++#ifdef CONFIG_IP_NF_CONNTRACK ++ enum ip_conntrack_info ctinfo; ++ struct ip_conntrack *ct = ip_conntrack_get(skb, &ctinfo); ++#endif ++ ++ switch (skb->protocol) { ++ case __constant_htons(ETH_P_IP): ++ { ++ struct iphdr *iph = skb->nh.iph; ++ info.dst = iph->daddr; ++ info.src = iph->saddr; ++ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && ++ (iph->protocol == IPPROTO_TCP || ++ iph->protocol == IPPROTO_UDP || ++ iph->protocol == IPPROTO_SCTP || ++ iph->protocol == IPPROTO_DCCP || ++ iph->protocol == IPPROTO_ESP)) ++ info.proto = *(((u32*)iph) + iph->ihl); ++ else ++ info.proto = iph->protocol; ++ break; ++ } ++ default: ++ info.dst = (u32)(unsigned long)skb->dst; ++ info.src = (u32)(unsigned long)skb->sk; ++ info.proto = skb->protocol; ++ } ++ ++#ifdef CONFIG_IP_NF_CONNTRACK ++ /* defaults if there is no conntrack info */ ++ info.ctorigsrc = info.src; ++ info.ctorigdst = info.dst; ++ info.ctreplsrc = info.dst; ++ info.ctrepldst = info.src; ++ /* collect conntrack info */ ++ IP_NF_ASSERT(ct); ++ if (ct) { ++ if (skb->protocol == __constant_htons(ETH_P_IP)) { ++ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; ++ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip; ++ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip; ++ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip; ++ } ++ } ++#endif ++ ++ switch(q->hash_kind) ++ { ++ case TCA_SFQ_HASH_CLASSIC: ++ return esfq_jhash_3words(q, info.dst, info.src, info.proto); ++ case TCA_SFQ_HASH_DST: ++ return esfq_jhash_1word(q, info.dst); ++ case TCA_SFQ_HASH_SRC: ++ return esfq_jhash_1word(q, info.src); ++#ifdef CONFIG_IP_NF_CONNTRACK ++ case TCA_SFQ_HASH_CTORIGDST: ++ return esfq_jhash_1word(q, info.ctorigdst); ++ case TCA_SFQ_HASH_CTORIGSRC: ++ return esfq_jhash_1word(q, info.ctorigsrc); ++ case TCA_SFQ_HASH_CTREPLDST: ++ return esfq_jhash_1word(q, info.ctrepldst); ++ case TCA_SFQ_HASH_CTREPLSRC: ++ return esfq_jhash_1word(q, info.ctreplsrc); ++ case TCA_SFQ_HASH_CTNATCHG: ++ { ++ if (info.ctorigdst == info.ctreplsrc) ++ return esfq_jhash_1word(q, info.ctorigsrc); ++ else ++ return esfq_jhash_1word(q, info.ctreplsrc); ++ } ++#endif ++ default: ++ if (net_ratelimit()) ++ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n"); ++ } ++ return esfq_jhash_3words(q, info.dst, info.src, info.proto); ++} ++ ++static inline void esfq_link(struct esfq_sched_data *q, esfq_index x) ++{ ++ esfq_index p, n; ++ int d = q->qs[x].qlen + q->depth; ++ ++ p = d; ++ n = q->dep[d].next; ++ q->dep[x].next = n; ++ q->dep[x].prev = p; ++ q->dep[p].next = q->dep[n].prev = x; ++} ++ ++static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x) ++{ ++ esfq_index p, n; ++ ++ n = q->dep[x].next; ++ p = q->dep[x].prev; ++ q->dep[p].next = n; ++ q->dep[n].prev = p; ++ ++ if (n == p && q->max_depth == q->qs[x].qlen + 1) ++ q->max_depth--; ++ ++ esfq_link(q, x); ++} ++ ++static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x) ++{ ++ esfq_index p, n; ++ int d; ++ ++ n = q->dep[x].next; ++ p = q->dep[x].prev; ++ q->dep[p].next = n; ++ q->dep[n].prev = p; ++ d = q->qs[x].qlen; ++ if (q->max_depth < d) ++ q->max_depth = d; ++ ++ esfq_link(q, x); ++} ++ ++static unsigned int esfq_drop(struct Qdisc *sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ esfq_index d = q->max_depth; ++ struct sk_buff *skb; ++ unsigned int len; ++ ++ /* Queue is full! Find the longest slot and ++ drop a packet from it */ ++ ++ if (d > 1) { ++ esfq_index x = q->dep[d+q->depth].next; ++ skb = q->qs[x].prev; ++ len = skb->len; ++ __skb_unlink(skb, &q->qs[x]); ++ kfree_skb(skb); ++ esfq_dec(q, x); ++ sch->q.qlen--; ++ sch->stats.drops++; ++ sch->stats.backlog -= len; ++ return len; ++ } ++ ++ if (d == 1) { ++ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ ++ d = q->next[q->tail]; ++ q->next[q->tail] = q->next[d]; ++ q->allot[q->next[d]] += q->quantum; ++ skb = q->qs[d].prev; ++ len = skb->len; ++ __skb_unlink(skb, &q->qs[d]); ++ kfree_skb(skb); ++ esfq_dec(q, d); ++ sch->q.qlen--; ++ q->ht[q->hash[d]] = q->depth; ++ sch->stats.drops++; ++ sch->stats.backlog -= len; ++ return len; ++ } ++ ++ return 0; ++} ++ ++static int ++esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ unsigned hash = esfq_hash(q, skb); ++ unsigned depth = q->depth; ++ esfq_index x; ++ ++ x = q->ht[hash]; ++ if (x == depth) { ++ q->ht[hash] = x = q->dep[depth].next; ++ q->hash[x] = hash; ++ } ++ sch->stats.backlog += skb->len; ++ __skb_queue_tail(&q->qs[x], skb); ++ esfq_inc(q, x); ++ if (q->qs[x].qlen == 1) { /* The flow is new */ ++ if (q->tail == depth) { /* It is the first flow */ ++ q->tail = x; ++ q->next[x] = x; ++ q->allot[x] = q->quantum; ++ } else { ++ q->next[x] = q->next[q->tail]; ++ q->next[q->tail] = x; ++ q->tail = x; ++ } ++ } ++ if (++sch->q.qlen < q->limit-1) { ++ sch->stats.bytes += skb->len; ++ sch->stats.packets++; ++ return 0; ++ } ++ ++ esfq_drop(sch); ++ return NET_XMIT_CN; ++} ++ ++static int ++esfq_requeue(struct sk_buff *skb, struct Qdisc* sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ unsigned hash = esfq_hash(q, skb); ++ unsigned depth = q->depth; ++ esfq_index x; ++ ++ x = q->ht[hash]; ++ if (x == depth) { ++ q->ht[hash] = x = q->dep[depth].next; ++ q->hash[x] = hash; ++ } ++ sch->stats.backlog += skb->len; ++ __skb_queue_head(&q->qs[x], skb); ++ esfq_inc(q, x); ++ if (q->qs[x].qlen == 1) { /* The flow is new */ ++ if (q->tail == depth) { /* It is the first flow */ ++ q->tail = x; ++ q->next[x] = x; ++ q->allot[x] = q->quantum; ++ } else { ++ q->next[x] = q->next[q->tail]; ++ q->next[q->tail] = x; ++ q->tail = x; ++ } ++ } ++ if (++sch->q.qlen < q->limit - 1) { ++ return 0; ++ } ++ ++ sch->stats.drops++; ++ esfq_drop(sch); ++ return NET_XMIT_CN; ++} ++ ++ ++ ++ ++static struct sk_buff * ++esfq_dequeue(struct Qdisc* sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ struct sk_buff *skb; ++ unsigned depth = q->depth; ++ esfq_index a, old_a; ++ ++ /* No active slots */ ++ if (q->tail == depth) ++ return NULL; ++ ++ a = old_a = q->next[q->tail]; ++ ++ /* Grab packet */ ++ skb = __skb_dequeue(&q->qs[a]); ++ esfq_dec(q, a); ++ sch->q.qlen--; ++ sch->stats.backlog -= skb->len; ++ ++ /* Is the slot empty? */ ++ if (q->qs[a].qlen == 0) { ++ q->ht[q->hash[a]] = depth; ++ a = q->next[a]; ++ if (a == old_a) { ++ q->tail = depth; ++ return skb; ++ } ++ q->next[q->tail] = a; ++ q->allot[a] += q->quantum; ++ } else if ((q->allot[a] -= skb->len) <= 0) { ++ q->tail = a; ++ a = q->next[a]; ++ q->allot[a] += q->quantum; ++ } ++ ++ return skb; ++} ++ ++static void ++esfq_reset(struct Qdisc* sch) ++{ ++ struct sk_buff *skb; ++ ++ while ((skb = esfq_dequeue(sch)) != NULL) ++ kfree_skb(skb); ++} ++ ++static void esfq_perturbation(unsigned long arg) ++{ ++ struct Qdisc *sch = (struct Qdisc*)arg; ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ ++ q->perturbation = net_random()&0x1F; ++ ++ if (q->perturb_period) { ++ q->perturb_timer.expires = jiffies + q->perturb_period; ++ add_timer(&q->perturb_timer); ++ } ++} ++ ++static int esfq_change(struct Qdisc *sch, struct rtattr *opt) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ struct tc_esfq_qopt *ctl = RTA_DATA(opt); ++ int old_perturb = q->perturb_period; ++ ++ if (opt->rta_len < RTA_LENGTH(sizeof(*ctl))) ++ return -EINVAL; ++ ++ sch_tree_lock(sch); ++ q->quantum = ctl->quantum ? : psched_mtu(sch->dev); ++ q->perturb_period = ctl->perturb_period*HZ; ++// q->hash_divisor = ctl->divisor; ++// q->tail = q->limit = q->depth = ctl->flows; ++ ++ if (ctl->limit) ++ q->limit = min_t(u32, ctl->limit, q->depth); ++ ++ if (ctl->hash_kind) { ++ q->hash_kind = ctl->hash_kind; ++ if (q->hash_kind != TCA_SFQ_HASH_CLASSIC) ++ q->perturb_period = 0; ++ } ++ ++ // is sch_tree_lock enough to do this ? ++ while (sch->q.qlen >= q->limit-1) ++ esfq_drop(sch); ++ ++ if (old_perturb) ++ del_timer(&q->perturb_timer); ++ if (q->perturb_period) { ++ q->perturb_timer.expires = jiffies + q->perturb_period; ++ add_timer(&q->perturb_timer); ++ } else { ++ q->perturbation = 0; ++ } ++ sch_tree_unlock(sch); ++ return 0; ++} ++ ++static int esfq_init(struct Qdisc *sch, struct rtattr *opt) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ struct tc_esfq_qopt *ctl; ++ esfq_index p = ~0U/2; ++ int i; ++ ++ if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl))) ++ return -EINVAL; ++ ++ init_timer(&q->perturb_timer); ++ q->perturb_timer.data = (unsigned long)sch; ++ q->perturb_timer.function = esfq_perturbation; ++ q->perturbation = 0; ++ q->hash_kind = TCA_SFQ_HASH_CLASSIC; ++ q->max_depth = 0; ++ q->dyn_min = ~0U; /* maximum value for this type */ ++ q->dyn_max = 0; /* dyn_min/dyn_max will be set properly upon first packet */ ++ if (opt == NULL) { ++ q->quantum = psched_mtu(sch->dev); ++ q->perturb_period = 0; ++ q->hash_divisor = 1024; ++ q->tail = q->limit = q->depth = 128; ++ ++ } else { ++ ctl = RTA_DATA(opt); ++ q->quantum = ctl->quantum ? : psched_mtu(sch->dev); ++ q->perturb_period = ctl->perturb_period*HZ; ++ q->hash_divisor = ctl->divisor ? : 1024; ++ q->tail = q->limit = q->depth = ctl->flows ? : 128; ++ ++ if ( q->depth > p - 1 ) ++ return -EINVAL; ++ ++ if (ctl->limit) ++ q->limit = min_t(u32, ctl->limit, q->depth); ++ ++ if (ctl->hash_kind) { ++ q->hash_kind = ctl->hash_kind; ++ } ++ ++ if (q->perturb_period) { ++ q->perturb_timer.expires = jiffies + q->perturb_period; ++ add_timer(&q->perturb_timer); ++ } ++ } ++ ++ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL); ++ if (!q->ht) ++ goto err_case; ++ ++ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL); ++ if (!q->dep) ++ goto err_case; ++ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL); ++ if (!q->next) ++ goto err_case; ++ ++ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL); ++ if (!q->allot) ++ goto err_case; ++ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL); ++ if (!q->hash) ++ goto err_case; ++ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL); ++ if (!q->qs) ++ goto err_case; ++ ++ for (i=0; i< q->hash_divisor; i++) ++ q->ht[i] = q->depth; ++ for (i=0; idepth; i++) { ++ skb_queue_head_init(&q->qs[i]); ++ q->dep[i+q->depth].next = i+q->depth; ++ q->dep[i+q->depth].prev = i+q->depth; ++ } ++ ++ for (i=0; idepth; i++) ++ esfq_link(q, i); ++ return 0; ++err_case: ++ del_timer(&q->perturb_timer); ++ if (q->ht) ++ kfree(q->ht); ++ if (q->dep) ++ kfree(q->dep); ++ if (q->next) ++ kfree(q->next); ++ if (q->allot) ++ kfree(q->allot); ++ if (q->hash) ++ kfree(q->hash); ++ if (q->qs) ++ kfree(q->qs); ++ return -ENOBUFS; ++} ++ ++static void esfq_destroy(struct Qdisc *sch) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ del_timer(&q->perturb_timer); ++ if(q->ht) ++ kfree(q->ht); ++ if(q->dep) ++ kfree(q->dep); ++ if(q->next) ++ kfree(q->next); ++ if(q->allot) ++ kfree(q->allot); ++ if(q->hash) ++ kfree(q->hash); ++ if(q->qs) ++ kfree(q->qs); ++} ++ ++static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb) ++{ ++ struct esfq_sched_data *q = qdisc_priv(sch); ++ unsigned char *b = skb->tail; ++ struct tc_esfq_qopt opt; ++ ++ opt.quantum = q->quantum; ++ opt.perturb_period = q->perturb_period/HZ; ++ ++ opt.limit = q->limit; ++ opt.divisor = q->hash_divisor; ++ opt.flows = q->depth; ++ opt.hash_kind = q->hash_kind; ++ ++ RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); ++ ++ return skb->len; ++ ++rtattr_failure: ++ skb_trim(skb, b - skb->data); ++ return -1; ++} ++ ++static struct Qdisc_ops esfq_qdisc_ops = ++{ ++ .next = NULL, ++ .cl_ops = NULL, ++ .id = "esfq", ++ .priv_size = sizeof(struct esfq_sched_data), ++ .enqueue = esfq_enqueue, ++ .dequeue = esfq_dequeue, ++ .requeue = esfq_requeue, ++ .drop = esfq_drop, ++ .init = esfq_init, ++ .reset = esfq_reset, ++ .destroy = esfq_destroy, ++ .change = NULL, /* esfq_change - needs more work */ ++ .dump = esfq_dump, ++}; ++ ++static int __init esfq_module_init(void) ++{ ++ return register_qdisc(&esfq_qdisc_ops); ++} ++static void __exit esfq_module_exit(void) ++{ ++ unregister_qdisc(&esfq_qdisc_ops); ++} ++module_init(esfq_module_init) ++module_exit(esfq_module_exit) ++MODULE_LICENSE("GPL"); diff --git a/target/linux/generic-2.4/patches/623-netfilter_ip6t_reject.patch b/target/linux/generic-2.4/patches/623-netfilter_ip6t_reject.patch deleted file mode 100644 index 2caae3ab01..0000000000 --- a/target/linux/generic-2.4/patches/623-netfilter_ip6t_reject.patch +++ /dev/null @@ -1,360 +0,0 @@ -Index: linux-2.4.35.4/net/ipv6/netfilter/ip6t_REJECT.c -=================================================================== ---- /dev/null -+++ linux-2.4.35.4/net/ipv6/netfilter/ip6t_REJECT.c -@@ -0,0 +1,301 @@ -+/* -+ * This is a module which is used for rejecting packets. -+ * Added support for customized reject packets (Jozsef Kadlecsik). -+ * Sun 12 Nov 2000 -+ * Port to IPv6 / ip6tables (Harald Welte ) -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#if 1 -+#define DEBUGP printk -+#else -+#define DEBUGP(format, args...) -+#endif -+ -+#if 0 -+/* Send RST reply */ -+static void send_reset(struct sk_buff *oldskb) -+{ -+ struct sk_buff *nskb; -+ struct tcphdr *otcph, *tcph; -+ struct rtable *rt; -+ unsigned int otcplen; -+ int needs_ack; -+ -+ /* IP header checks: fragment, too short. */ -+ if (oldskb->nh.iph->frag_off & htons(IP_OFFSET) -+ || oldskb->len < (oldskb->nh.iph->ihl<<2) + sizeof(struct tcphdr)) -+ return; -+ -+ otcph = (struct tcphdr *)((u_int32_t*)oldskb->nh.iph + oldskb->nh.iph->ihl); -+ otcplen = oldskb->len - oldskb->nh.iph->ihl*4; -+ -+ /* No RST for RST. */ -+ if (otcph->rst) -+ return; -+ -+ /* Check checksum. */ -+ if (tcp_v4_check(otcph, otcplen, oldskb->nh.iph->saddr, -+ oldskb->nh.iph->daddr, -+ csum_partial((char *)otcph, otcplen, 0)) != 0) -+ return; -+ -+ /* Copy skb (even if skb is about to be dropped, we can't just -+ clone it because there may be other things, such as tcpdump, -+ interested in it) */ -+ nskb = skb_copy(oldskb, GFP_ATOMIC); -+ if (!nskb) -+ return; -+ -+ /* This packet will not be the same as the other: clear nf fields */ -+ nf_conntrack_put(nskb->nfct); -+ nskb->nfct = NULL; -+ nskb->nfcache = 0; -+#ifdef CONFIG_NETFILTER_DEBUG -+ nskb->nf_debug = 0; -+#endif -+ -+ tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl); -+ -+ nskb->nh.iph->daddr = xchg(&nskb->nh.iph->saddr, nskb->nh.iph->daddr); -+ tcph->source = xchg(&tcph->dest, tcph->source); -+ -+ /* Truncate to length (no data) */ -+ tcph->doff = sizeof(struct tcphdr)/4; -+ skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr)); -+ nskb->nh.iph->tot_len = htons(nskb->len); -+ -+ if (tcph->ack) { -+ needs_ack = 0; -+ tcph->seq = otcph->ack_seq; -+ tcph->ack_seq = 0; -+ } else { -+ needs_ack = 1; -+ tcph->ack_seq = htonl(ntohl(otcph->seq) + otcph->syn + otcph->fin -+ + otcplen - (otcph->doff<<2)); -+ tcph->seq = 0; -+ } -+ -+ /* Reset flags */ -+ ((u_int8_t *)tcph)[13] = 0; -+ tcph->rst = 1; -+ tcph->ack = needs_ack; -+ -+ tcph->window = 0; -+ tcph->urg_ptr = 0; -+ -+ /* Adjust TCP checksum */ -+ tcph->check = 0; -+ tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr), -+ nskb->nh.iph->saddr, -+ nskb->nh.iph->daddr, -+ csum_partial((char *)tcph, -+ sizeof(struct tcphdr), 0)); -+ -+ /* Adjust IP TTL, DF */ -+ nskb->nh.iph->ttl = MAXTTL; -+ /* Set DF, id = 0 */ -+ nskb->nh.iph->frag_off = htons(IP_DF); -+ nskb->nh.iph->id = 0; -+ -+ /* Adjust IP checksum */ -+ nskb->nh.iph->check = 0; -+ nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, -+ nskb->nh.iph->ihl); -+ -+ /* Routing */ -+ if (ip_route_output(&rt, nskb->nh.iph->daddr, nskb->nh.iph->saddr, -+ RT_TOS(nskb->nh.iph->tos) | RTO_CONN, -+ 0) != 0) -+ goto free_nskb; -+ -+ dst_release(nskb->dst); -+ nskb->dst = &rt->u.dst; -+ -+ /* "Never happens" */ -+ if (nskb->len > nskb->dst->pmtu) -+ goto free_nskb; -+ -+ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev, -+ ip_finish_output); -+ return; -+ -+ free_nskb: -+ kfree_skb(nskb); -+} -+#endif -+ -+static unsigned int reject6_target(struct sk_buff **pskb, -+ unsigned int hooknum, -+ const struct net_device *in, -+ const struct net_device *out, -+ const void *targinfo, -+ void *userinfo) -+{ -+ const struct ip6t_reject_info *reject = targinfo; -+ struct sk_buff *skb2 = NULL; -+ struct rt6_info *rt6i; -+ struct net_device odev; -+ -+ if (!out) { -+ skb2 = skb_clone(*pskb, GFP_ATOMIC); -+ if (skb2 == NULL) { -+ return NF_DROP; -+ } -+ dst_release(skb2->dst); -+ skb2->dst = NULL; -+ -+ rt6i = rt6_lookup(&skb2->nh.ipv6h->saddr, NULL, 0, 0); -+ if (rt6i && rt6i->rt6i_dev) { -+ skb2->dev = rt6i->rt6i_dev; -+ rt6i = rt6_lookup(&skb2->nh.ipv6h->daddr, &skb2->nh.ipv6h->saddr, 0, 0); -+ } -+ memcpy(&odev, skb2->dev, sizeof(odev)); /* XXX 'out' has 'const' qualifier... */ -+ } else { -+ memcpy(&odev, out, sizeof(odev)); -+ } -+ -+ printk(KERN_DEBUG "%s: medium point\n", __FUNCTION__); -+ /* WARNING: This code causes reentry within ip6tables. -+ This means that the ip6tables jump stack is now crap. We -+ must return an absolute verdict. --RR */ -+ DEBUGP("REJECTv6: calling icmpv6_send\n"); -+ switch (reject->with) { -+ case IP6T_ICMP6_NO_ROUTE: -+ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, &odev); -+ break; -+ case IP6T_ICMP6_ADM_PROHIBITED: -+ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_ADM_PROHIBITED, 0, &odev); -+ break; -+ case IP6T_ICMP6_NOT_NEIGHBOUR: -+ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0, &odev); -+ break; -+ case IP6T_ICMP6_ADDR_UNREACH: -+ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, &odev); -+ break; -+ case IP6T_ICMP6_PORT_UNREACH: -+ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, &odev); -+ break; -+#if 0 -+ case IPT_ICMP_ECHOREPLY: { -+ struct icmp6hdr *icmph = (struct icmphdr *) -+ ((u_int32_t *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl); -+ unsigned int datalen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4; -+ -+ /* Not non-head frags, or truncated */ -+ if (((ntohs((*pskb)->nh.iph->frag_off) & IP_OFFSET) == 0) -+ && datalen >= 4) { -+ /* Usually I don't like cut & pasting code, -+ but dammit, my party is starting in 45 -+ mins! --RR */ -+ struct icmp_bxm icmp_param; -+ -+ icmp_param.icmph=*icmph; -+ icmp_param.icmph.type=ICMP_ECHOREPLY; -+ icmp_param.data_ptr=(icmph+1); -+ icmp_param.data_len=datalen; -+ icmp_reply(&icmp_param, *pskb); -+ } -+ } -+ break; -+ case IPT_TCP_RESET: -+ send_reset(*pskb); -+ break; -+#endif -+ default: -+ printk(KERN_WARNING "REJECTv6: case %u not handled yet\n", reject->with); -+ break; -+ } -+ -+ if (skb2) kfree_skb(skb2); -+ -+ return NF_DROP; -+} -+ -+static inline int find_ping_match(const struct ip6t_entry_match *m) -+{ -+ const struct ip6t_icmp *icmpinfo = (const struct ip6t_icmp *)m->data; -+ -+ if (strcmp(m->u.kernel.match->name, "icmp6") == 0 -+ && icmpinfo->type == ICMPV6_ECHO_REQUEST -+ && !(icmpinfo->invflags & IP6T_ICMP_INV)) -+ return 1; -+ -+ return 0; -+} -+ -+static int check(const char *tablename, -+ const struct ip6t_entry *e, -+ void *targinfo, -+ unsigned int targinfosize, -+ unsigned int hook_mask) -+{ -+ const struct ip6t_reject_info *rejinfo = targinfo; -+ -+ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_reject_info))) { -+ DEBUGP("REJECTv6: targinfosize %u != 0\n", targinfosize); -+ return 0; -+ } -+ -+ /* Only allow these for packet filtering. */ -+ if (strcmp(tablename, "filter") != 0) { -+ DEBUGP("REJECTv6: bad table `%s'.\n", tablename); -+ return 0; -+ } -+ if ((hook_mask & ~((1 << NF_IP6_LOCAL_IN) -+ | (1 << NF_IP6_FORWARD) -+ | (1 << NF_IP6_LOCAL_OUT))) != 0) { -+ DEBUGP("REJECTv6: bad hook mask %X\n", hook_mask); -+ return 0; -+ } -+ -+ if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { -+ /* Must specify that it's an ICMP ping packet. */ -+ if (e->ipv6.proto != IPPROTO_ICMPV6 -+ || (e->ipv6.invflags & IP6T_INV_PROTO)) { -+ DEBUGP("REJECTv6: ECHOREPLY illegal for non-icmp\n"); -+ return 0; -+ } -+ /* Must contain ICMP match. */ -+ if (IP6T_MATCH_ITERATE(e, find_ping_match) == 0) { -+ DEBUGP("REJECTv6: ECHOREPLY illegal for non-ping\n"); -+ return 0; -+ } -+ } else if (rejinfo->with == IP6T_TCP_RESET) { -+ /* Must specify that it's a TCP packet */ -+ if (e->ipv6.proto != IPPROTO_TCP -+ || (e->ipv6.invflags & IP6T_INV_PROTO)) { -+ DEBUGP("REJECTv6: TCP_RESET illegal for non-tcp\n"); -+ return 0; -+ } -+ } -+ -+ return 1; -+} -+ -+static struct ip6t_target ip6t_reject_reg -+= { { NULL, NULL }, "REJECT", reject6_target, check, NULL, THIS_MODULE }; -+ -+static int __init init(void) -+{ -+ if (ip6t_register_target(&ip6t_reject_reg)) -+ return -EINVAL; -+ return 0; -+} -+ -+static void __exit fini(void) -+{ -+ ip6t_unregister_target(&ip6t_reject_reg); -+} -+ -+module_init(init); -+module_exit(fini); -Index: linux-2.4.35.4/include/linux/netfilter_ipv6/ip6t_REJECT.h -=================================================================== ---- linux-2.4.35.4.orig/include/linux/netfilter_ipv6/ip6t_REJECT.h -+++ linux-2.4.35.4/include/linux/netfilter_ipv6/ip6t_REJECT.h -@@ -2,15 +2,17 @@ - #define _IP6T_REJECT_H - - enum ip6t_reject_with { -- IP6T_ICMP_NET_UNREACHABLE, -- IP6T_ICMP_HOST_UNREACHABLE, -- IP6T_ICMP_PROT_UNREACHABLE, -- IP6T_ICMP_PORT_UNREACHABLE, -- IP6T_ICMP_ECHOREPLY -+ IP6T_ICMP6_NO_ROUTE, -+ IP6T_ICMP6_ADM_PROHIBITED, -+ IP6T_ICMP6_NOT_NEIGHBOUR, -+ IP6T_ICMP6_ADDR_UNREACH, -+ IP6T_ICMP6_PORT_UNREACH, -+ IP6T_ICMP6_ECHOREPLY, -+ IP6T_TCP_RESET - }; - - struct ip6t_reject_info { - enum ip6t_reject_with with; /* reject type */ - }; - --#endif /*_IPT_REJECT_H*/ -+#endif /*_IP6T_REJECT_H*/ -Index: linux-2.4.35.4/net/ipv6/netfilter/Makefile -=================================================================== ---- linux-2.4.35.4.orig/net/ipv6/netfilter/Makefile -+++ linux-2.4.35.4/net/ipv6/netfilter/Makefile -@@ -34,5 +34,7 @@ obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue. - obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o - obj-$(CONFIG_IP6_NF_MATCH_RANDOM) += ip6t_random.o - obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o -+obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o -+ - - include $(TOPDIR)/Rules.make -Index: linux-2.4.35.4/net/ipv6/netfilter/Config.in -=================================================================== ---- linux-2.4.35.4.orig/net/ipv6/netfilter/Config.in -+++ linux-2.4.35.4/net/ipv6/netfilter/Config.in -@@ -61,6 +61,9 @@ if [ "$CONFIG_IP6_NF_IPTABLES" != "n" ]; - if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then - dep_tristate ' LOG target support' CONFIG_IP6_NF_TARGET_LOG $CONFIG_IP6_NF_FILTER - fi -+ if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then -+ dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER -+ fi - - # if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then - # dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER diff --git a/target/linux/generic-2.4/patches/623-netfilter_ipset_porthash.patch b/target/linux/generic-2.4/patches/623-netfilter_ipset_porthash.patch new file mode 100644 index 0000000000..0fc1ec6219 --- /dev/null +++ b/target/linux/generic-2.4/patches/623-netfilter_ipset_porthash.patch @@ -0,0 +1,39 @@ +Index: linux-2.4.35.4/include/linux/netfilter_ipv4/ip_set_ipporthash.h +=================================================================== +--- /dev/null ++++ linux-2.4.35.4/include/linux/netfilter_ipv4/ip_set_ipporthash.h +@@ -0,0 +1,34 @@ ++#ifndef __IP_SET_IPPORTHASH_H ++#define __IP_SET_IPPORTHASH_H ++ ++#include ++ ++#define SETTYPE_NAME "ipporthash" ++#define MAX_RANGE 0x0000FFFF ++#define INVALID_PORT (MAX_RANGE + 1) ++ ++struct ip_set_ipporthash { ++ ip_set_ip_t *members; /* the ipporthash proper */ ++ uint32_t elements; /* number of elements */ ++ uint32_t hashsize; /* hash size */ ++ uint16_t probes; /* max number of probes */ ++ uint16_t resize; /* resize factor in percent */ ++ ip_set_ip_t first_ip; /* host byte order, included in range */ ++ ip_set_ip_t last_ip; /* host byte order, included in range */ ++ void *initval[0]; /* initvals for jhash_1word */ ++}; ++ ++struct ip_set_req_ipporthash_create { ++ uint32_t hashsize; ++ uint16_t probes; ++ uint16_t resize; ++ ip_set_ip_t from; ++ ip_set_ip_t to; ++}; ++ ++struct ip_set_req_ipporthash { ++ ip_set_ip_t ip; ++ ip_set_ip_t port; ++}; ++ ++#endif /* __IP_SET_IPPORTHASH_H */ diff --git a/target/linux/generic-2.4/patches/624-netfilter_ip6t_reject.patch b/target/linux/generic-2.4/patches/624-netfilter_ip6t_reject.patch new file mode 100644 index 0000000000..2caae3ab01 --- /dev/null +++ b/target/linux/generic-2.4/patches/624-netfilter_ip6t_reject.patch @@ -0,0 +1,360 @@ +Index: linux-2.4.35.4/net/ipv6/netfilter/ip6t_REJECT.c +=================================================================== +--- /dev/null ++++ linux-2.4.35.4/net/ipv6/netfilter/ip6t_REJECT.c +@@ -0,0 +1,301 @@ ++/* ++ * This is a module which is used for rejecting packets. ++ * Added support for customized reject packets (Jozsef Kadlecsik). ++ * Sun 12 Nov 2000 ++ * Port to IPv6 / ip6tables (Harald Welte ) ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if 1 ++#define DEBUGP printk ++#else ++#define DEBUGP(format, args...) ++#endif ++ ++#if 0 ++/* Send RST reply */ ++static void send_reset(struct sk_buff *oldskb) ++{ ++ struct sk_buff *nskb; ++ struct tcphdr *otcph, *tcph; ++ struct rtable *rt; ++ unsigned int otcplen; ++ int needs_ack; ++ ++ /* IP header checks: fragment, too short. */ ++ if (oldskb->nh.iph->frag_off & htons(IP_OFFSET) ++ || oldskb->len < (oldskb->nh.iph->ihl<<2) + sizeof(struct tcphdr)) ++ return; ++ ++ otcph = (struct tcphdr *)((u_int32_t*)oldskb->nh.iph + oldskb->nh.iph->ihl); ++ otcplen = oldskb->len - oldskb->nh.iph->ihl*4; ++ ++ /* No RST for RST. */ ++ if (otcph->rst) ++ return; ++ ++ /* Check checksum. */ ++ if (tcp_v4_check(otcph, otcplen, oldskb->nh.iph->saddr, ++ oldskb->nh.iph->daddr, ++ csum_partial((char *)otcph, otcplen, 0)) != 0) ++ return; ++ ++ /* Copy skb (even if skb is about to be dropped, we can't just ++ clone it because there may be other things, such as tcpdump, ++ interested in it) */ ++ nskb = skb_copy(oldskb, GFP_ATOMIC); ++ if (!nskb) ++ return; ++ ++ /* This packet will not be the same as the other: clear nf fields */ ++ nf_conntrack_put(nskb->nfct); ++ nskb->nfct = NULL; ++ nskb->nfcache = 0; ++#ifdef CONFIG_NETFILTER_DEBUG ++ nskb->nf_debug = 0; ++#endif ++ ++ tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl); ++ ++ nskb->nh.iph->daddr = xchg(&nskb->nh.iph->saddr, nskb->nh.iph->daddr); ++ tcph->source = xchg(&tcph->dest, tcph->source); ++ ++ /* Truncate to length (no data) */ ++ tcph->doff = sizeof(struct tcphdr)/4; ++ skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr)); ++ nskb->nh.iph->tot_len = htons(nskb->len); ++ ++ if (tcph->ack) { ++ needs_ack = 0; ++ tcph->seq = otcph->ack_seq; ++ tcph->ack_seq = 0; ++ } else { ++ needs_ack = 1; ++ tcph->ack_seq = htonl(ntohl(otcph->seq) + otcph->syn + otcph->fin ++ + otcplen - (otcph->doff<<2)); ++ tcph->seq = 0; ++ } ++ ++ /* Reset flags */ ++ ((u_int8_t *)tcph)[13] = 0; ++ tcph->rst = 1; ++ tcph->ack = needs_ack; ++ ++ tcph->window = 0; ++ tcph->urg_ptr = 0; ++ ++ /* Adjust TCP checksum */ ++ tcph->check = 0; ++ tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr), ++ nskb->nh.iph->saddr, ++ nskb->nh.iph->daddr, ++ csum_partial((char *)tcph, ++ sizeof(struct tcphdr), 0)); ++ ++ /* Adjust IP TTL, DF */ ++ nskb->nh.iph->ttl = MAXTTL; ++ /* Set DF, id = 0 */ ++ nskb->nh.iph->frag_off = htons(IP_DF); ++ nskb->nh.iph->id = 0; ++ ++ /* Adjust IP checksum */ ++ nskb->nh.iph->check = 0; ++ nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, ++ nskb->nh.iph->ihl); ++ ++ /* Routing */ ++ if (ip_route_output(&rt, nskb->nh.iph->daddr, nskb->nh.iph->saddr, ++ RT_TOS(nskb->nh.iph->tos) | RTO_CONN, ++ 0) != 0) ++ goto free_nskb; ++ ++ dst_release(nskb->dst); ++ nskb->dst = &rt->u.dst; ++ ++ /* "Never happens" */ ++ if (nskb->len > nskb->dst->pmtu) ++ goto free_nskb; ++ ++ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev, ++ ip_finish_output); ++ return; ++ ++ free_nskb: ++ kfree_skb(nskb); ++} ++#endif ++ ++static unsigned int reject6_target(struct sk_buff **pskb, ++ unsigned int hooknum, ++ const struct net_device *in, ++ const struct net_device *out, ++ const void *targinfo, ++ void *userinfo) ++{ ++ const struct ip6t_reject_info *reject = targinfo; ++ struct sk_buff *skb2 = NULL; ++ struct rt6_info *rt6i; ++ struct net_device odev; ++ ++ if (!out) { ++ skb2 = skb_clone(*pskb, GFP_ATOMIC); ++ if (skb2 == NULL) { ++ return NF_DROP; ++ } ++ dst_release(skb2->dst); ++ skb2->dst = NULL; ++ ++ rt6i = rt6_lookup(&skb2->nh.ipv6h->saddr, NULL, 0, 0); ++ if (rt6i && rt6i->rt6i_dev) { ++ skb2->dev = rt6i->rt6i_dev; ++ rt6i = rt6_lookup(&skb2->nh.ipv6h->daddr, &skb2->nh.ipv6h->saddr, 0, 0); ++ } ++ memcpy(&odev, skb2->dev, sizeof(odev)); /* XXX 'out' has 'const' qualifier... */ ++ } else { ++ memcpy(&odev, out, sizeof(odev)); ++ } ++ ++ printk(KERN_DEBUG "%s: medium point\n", __FUNCTION__); ++ /* WARNING: This code causes reentry within ip6tables. ++ This means that the ip6tables jump stack is now crap. We ++ must return an absolute verdict. --RR */ ++ DEBUGP("REJECTv6: calling icmpv6_send\n"); ++ switch (reject->with) { ++ case IP6T_ICMP6_NO_ROUTE: ++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, &odev); ++ break; ++ case IP6T_ICMP6_ADM_PROHIBITED: ++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_ADM_PROHIBITED, 0, &odev); ++ break; ++ case IP6T_ICMP6_NOT_NEIGHBOUR: ++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0, &odev); ++ break; ++ case IP6T_ICMP6_ADDR_UNREACH: ++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, &odev); ++ break; ++ case IP6T_ICMP6_PORT_UNREACH: ++ icmpv6_send(*pskb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, &odev); ++ break; ++#if 0 ++ case IPT_ICMP_ECHOREPLY: { ++ struct icmp6hdr *icmph = (struct icmphdr *) ++ ((u_int32_t *)(*pskb)->nh.iph + (*pskb)->nh.iph->ihl); ++ unsigned int datalen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4; ++ ++ /* Not non-head frags, or truncated */ ++ if (((ntohs((*pskb)->nh.iph->frag_off) & IP_OFFSET) == 0) ++ && datalen >= 4) { ++ /* Usually I don't like cut & pasting code, ++ but dammit, my party is starting in 45 ++ mins! --RR */ ++ struct icmp_bxm icmp_param; ++ ++ icmp_param.icmph=*icmph; ++ icmp_param.icmph.type=ICMP_ECHOREPLY; ++ icmp_param.data_ptr=(icmph+1); ++ icmp_param.data_len=datalen; ++ icmp_reply(&icmp_param, *pskb); ++ } ++ } ++ break; ++ case IPT_TCP_RESET: ++ send_reset(*pskb); ++ break; ++#endif ++ default: ++ printk(KERN_WARNING "REJECTv6: case %u not handled yet\n", reject->with); ++ break; ++ } ++ ++ if (skb2) kfree_skb(skb2); ++ ++ return NF_DROP; ++} ++ ++static inline int find_ping_match(const struct ip6t_entry_match *m) ++{ ++ const struct ip6t_icmp *icmpinfo = (const struct ip6t_icmp *)m->data; ++ ++ if (strcmp(m->u.kernel.match->name, "icmp6") == 0 ++ && icmpinfo->type == ICMPV6_ECHO_REQUEST ++ && !(icmpinfo->invflags & IP6T_ICMP_INV)) ++ return 1; ++ ++ return 0; ++} ++ ++static int check(const char *tablename, ++ const struct ip6t_entry *e, ++ void *targinfo, ++ unsigned int targinfosize, ++ unsigned int hook_mask) ++{ ++ const struct ip6t_reject_info *rejinfo = targinfo; ++ ++ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_reject_info))) { ++ DEBUGP("REJECTv6: targinfosize %u != 0\n", targinfosize); ++ return 0; ++ } ++ ++ /* Only allow these for packet filtering. */ ++ if (strcmp(tablename, "filter") != 0) { ++ DEBUGP("REJECTv6: bad table `%s'.\n", tablename); ++ return 0; ++ } ++ if ((hook_mask & ~((1 << NF_IP6_LOCAL_IN) ++ | (1 << NF_IP6_FORWARD) ++ | (1 << NF_IP6_LOCAL_OUT))) != 0) { ++ DEBUGP("REJECTv6: bad hook mask %X\n", hook_mask); ++ return 0; ++ } ++ ++ if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) { ++ /* Must specify that it's an ICMP ping packet. */ ++ if (e->ipv6.proto != IPPROTO_ICMPV6 ++ || (e->ipv6.invflags & IP6T_INV_PROTO)) { ++ DEBUGP("REJECTv6: ECHOREPLY illegal for non-icmp\n"); ++ return 0; ++ } ++ /* Must contain ICMP match. */ ++ if (IP6T_MATCH_ITERATE(e, find_ping_match) == 0) { ++ DEBUGP("REJECTv6: ECHOREPLY illegal for non-ping\n"); ++ return 0; ++ } ++ } else if (rejinfo->with == IP6T_TCP_RESET) { ++ /* Must specify that it's a TCP packet */ ++ if (e->ipv6.proto != IPPROTO_TCP ++ || (e->ipv6.invflags & IP6T_INV_PROTO)) { ++ DEBUGP("REJECTv6: TCP_RESET illegal for non-tcp\n"); ++ return 0; ++ } ++ } ++ ++ return 1; ++} ++ ++static struct ip6t_target ip6t_reject_reg ++= { { NULL, NULL }, "REJECT", reject6_target, check, NULL, THIS_MODULE }; ++ ++static int __init init(void) ++{ ++ if (ip6t_register_target(&ip6t_reject_reg)) ++ return -EINVAL; ++ return 0; ++} ++ ++static void __exit fini(void) ++{ ++ ip6t_unregister_target(&ip6t_reject_reg); ++} ++ ++module_init(init); ++module_exit(fini); +Index: linux-2.4.35.4/include/linux/netfilter_ipv6/ip6t_REJECT.h +=================================================================== +--- linux-2.4.35.4.orig/include/linux/netfilter_ipv6/ip6t_REJECT.h ++++ linux-2.4.35.4/include/linux/netfilter_ipv6/ip6t_REJECT.h +@@ -2,15 +2,17 @@ + #define _IP6T_REJECT_H + + enum ip6t_reject_with { +- IP6T_ICMP_NET_UNREACHABLE, +- IP6T_ICMP_HOST_UNREACHABLE, +- IP6T_ICMP_PROT_UNREACHABLE, +- IP6T_ICMP_PORT_UNREACHABLE, +- IP6T_ICMP_ECHOREPLY ++ IP6T_ICMP6_NO_ROUTE, ++ IP6T_ICMP6_ADM_PROHIBITED, ++ IP6T_ICMP6_NOT_NEIGHBOUR, ++ IP6T_ICMP6_ADDR_UNREACH, ++ IP6T_ICMP6_PORT_UNREACH, ++ IP6T_ICMP6_ECHOREPLY, ++ IP6T_TCP_RESET + }; + + struct ip6t_reject_info { + enum ip6t_reject_with with; /* reject type */ + }; + +-#endif /*_IPT_REJECT_H*/ ++#endif /*_IP6T_REJECT_H*/ +Index: linux-2.4.35.4/net/ipv6/netfilter/Makefile +=================================================================== +--- linux-2.4.35.4.orig/net/ipv6/netfilter/Makefile ++++ linux-2.4.35.4/net/ipv6/netfilter/Makefile +@@ -34,5 +34,7 @@ obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue. + obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o + obj-$(CONFIG_IP6_NF_MATCH_RANDOM) += ip6t_random.o + obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o ++obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o ++ + + include $(TOPDIR)/Rules.make +Index: linux-2.4.35.4/net/ipv6/netfilter/Config.in +=================================================================== +--- linux-2.4.35.4.orig/net/ipv6/netfilter/Config.in ++++ linux-2.4.35.4/net/ipv6/netfilter/Config.in +@@ -61,6 +61,9 @@ if [ "$CONFIG_IP6_NF_IPTABLES" != "n" ]; + if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then + dep_tristate ' LOG target support' CONFIG_IP6_NF_TARGET_LOG $CONFIG_IP6_NF_FILTER + fi ++ if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then ++ dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER ++ fi + + # if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then + # dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER