$(eval $(call nf_add,IPT_IPSET,CONFIG_IP_NF_SET_IPMAP, $(P_V4)ip_set_ipmap))
$(eval $(call nf_add,IPT_IPSET,CONFIG_IP_NF_SET_IPPORTHASH, $(P_V4)ip_set_ipporthash))
$(eval $(call nf_add,IPT_IPSET,CONFIG_IP_NF_SET_IPTREE, $(P_V4)ip_set_iptree))
+$(eval $(call nf_add,IPT_IPSET,CONFIG_IP_NF_SET_IPTREEMAP, $(P_V4)ip_set_iptreemap))
$(eval $(call nf_add,IPT_IPSET,CONFIG_IP_NF_SET_MACIPMAP, $(P_V4)ip_set_macipmap))
$(eval $(call nf_add,IPT_IPSET,CONFIG_IP_NF_SET_NETHASH, $(P_V4)ip_set_nethash))
$(eval $(call nf_add,IPT_IPSET,CONFIG_IP_NF_SET_PORTMAP, $(P_V4)ip_set_portmap))
define KernelPackage/ipt-ipset/description
Netfilter kernel modules for ipset
+ Includes:
+ - ip_set
+ - ip_set_iphash
+ - ip_set_ipmap
+ - ip_set_ipporthash
+ - ip_set_iptree
+ - ip_set_iptreemap
+ - ip_set_macipmap
+ - ip_set_nethash
+ - ip_set_portmap
+ - ipt_set
+ - ipt_SET
endef
$(eval $(call KernelPackage,ipt-ipset))
CONFIG_IP_NF_SET_IPMAP=m
CONFIG_IP_NF_SET_IPPORTHASH=m
CONFIG_IP_NF_SET_IPTREE=m
+CONFIG_IP_NF_SET_IPTREEMAP=m
CONFIG_IP_NF_SET_MACIPMAP=m
CONFIG_IP_NF_SET_MAX=256
CONFIG_IP_NF_SET_NETHASH=m
CONFIG_IP_NF_SET_IPMAP=m
CONFIG_IP_NF_SET_IPPORTHASH=m
CONFIG_IP_NF_SET_IPTREE=m
+CONFIG_IP_NF_SET_IPTREEMAP=m
CONFIG_IP_NF_SET_MACIPMAP=m
CONFIG_IP_NF_SET_MAX=256
CONFIG_IP_NF_SET_NETHASH=m
CONFIG_IP_NF_SET_IPMAP=m
CONFIG_IP_NF_SET_IPPORTHASH=m
CONFIG_IP_NF_SET_IPTREE=m
+CONFIG_IP_NF_SET_IPTREEMAP=m
CONFIG_IP_NF_SET_MACIPMAP=m
CONFIG_IP_NF_SET_MAX=256
CONFIG_IP_NF_SET_NETHASH=m
+++ /dev/null
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,498 @@
-+#ifndef _IP_SET_H
-+#define _IP_SET_H
-+
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#if 0
-+#define IP_SET_DEBUG
-+#endif
-+
-+/*
-+ * A sockopt of such quality has hardly ever been seen before on the open
-+ * market! This little beauty, hardly ever used: above 64, so it's
-+ * traditionally used for firewalling, not touched (even once!) by the
-+ * 2.0, 2.2 and 2.4 kernels!
-+ *
-+ * Comes with its own certificate of authenticity, valid anywhere in the
-+ * Free world!
-+ *
-+ * Rusty, 19.4.2000
-+ */
-+#define SO_IP_SET 83
-+
-+/*
-+ * Heavily modify by Joakim Axelsson 08.03.2002
-+ * - Made it more modulebased
-+ *
-+ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
-+ * - bindings added
-+ * - in order to "deal with" backward compatibility, renamed to ipset
-+ */
-+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
-+ */
-+#define IP_SET_PROTOCOL_VERSION 2
-+
-+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
-+
-+/* Lets work with our own typedef for representing an IP address.
-+ * We hope to make the code more portable, possibly to IPv6...
-+ *
-+ * The representation works in HOST byte order, because most set types
-+ * will perform arithmetic operations and compare operations.
-+ *
-+ * For now the type is an uint32_t.
-+ *
-+ * Make sure to ONLY use the functions when translating and parsing
-+ * in order to keep the host byte order and make it more portable:
-+ * parse_ip()
-+ * parse_mask()
-+ * parse_ipandmask()
-+ * ip_tostring()
-+ * (Joakim: where are they???)
-+ */
-+
-+typedef uint32_t ip_set_ip_t;
-+
-+/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
-+ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
-+ */
-+typedef uint16_t ip_set_id_t;
-+
-+#define IP_SET_INVALID_ID 65535
-+
-+/* How deep we follow bindings */
-+#define IP_SET_MAX_BINDINGS 6
-+
-+/*
-+ * Option flags for kernel operations (ipt_set_info)
-+ */
-+#define IPSET_SRC 0x01 /* Source match/add */
-+#define IPSET_DST 0x02 /* Destination match/add */
-+#define IPSET_MATCH_INV 0x04 /* Inverse matching */
-+
-+/*
-+ * Set features
-+ */
-+#define IPSET_TYPE_IP 0x01 /* IP address type of set */
-+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
-+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
-+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
-+
-+/* Reserved keywords */
-+#define IPSET_TOKEN_DEFAULT ":default:"
-+#define IPSET_TOKEN_ALL ":all:"
-+
-+/* SO_IP_SET operation constants, and their request struct types.
-+ *
-+ * Operation ids:
-+ * 0-99: commands with version checking
-+ * 100-199: add/del/test/bind/unbind
-+ * 200-299: list, save, restore
-+ */
-+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
-+ *
-+ * Sets are identified by name.
-+ */
-+
-+#define IP_SET_REQ_STD \
-+ unsigned op; \
-+ unsigned version; \
-+ char name[IP_SET_MAXNAMELEN]
-+
-+#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
-+struct ip_set_req_create {
-+ IP_SET_REQ_STD;
-+ char typename[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
-+struct ip_set_req_std {
-+ IP_SET_REQ_STD;
-+};
-+
-+#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
-+/* Uses ip_set_req_std */
-+
-+#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
-+/* Uses ip_set_req_create */
-+
-+#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
-+/* Uses ip_set_req_create */
-+
-+union ip_set_name_index {
-+ char name[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+};
-+
-+#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
-+struct ip_set_req_get_set {
-+ unsigned op;
-+ unsigned version;
-+ union ip_set_name_index set;
-+};
-+
-+#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
-+/* Uses ip_set_req_get_set */
-+
-+#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
-+struct ip_set_req_version {
-+ unsigned op;
-+ unsigned version;
-+};
-+
-+/* Double shots operations:
-+ * add, del, test, bind and unbind.
-+ *
-+ * First we query the kernel to get the index and type of the target set,
-+ * then issue the command. Validity of IP is checked in kernel in order
-+ * to minimalize sockopt operations.
-+ */
-+
-+/* Get minimal set data for add/del/test/bind/unbind IP */
-+#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
-+struct ip_set_req_adt_get {
-+ unsigned op;
-+ unsigned version;
-+ union ip_set_name_index set;
-+ char typename[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_REQ_BYINDEX \
-+ unsigned op; \
-+ ip_set_id_t index;
-+
-+struct ip_set_req_adt {
-+ IP_SET_REQ_BYINDEX;
-+};
-+
-+#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
-+/* Uses ip_set_req_bind, with type specific addage */
-+struct ip_set_req_bind {
-+ IP_SET_REQ_BYINDEX;
-+ char binding[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
-+ * index = 0 means unbinding for all sets */
-+
-+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
-+/* Uses ip_set_req_bind, with type specific addage */
-+
-+/* Multiple shots operations: list, save, restore.
-+ *
-+ * - check kernel version and query the max number of sets
-+ * - get the basic information on all sets
-+ * and size required for the next step
-+ * - get actual set data: header, data, bindings
-+ */
-+
-+/* Get max_sets and the index of a queried set
-+ */
-+#define IP_SET_OP_MAX_SETS 0x00000020
-+struct ip_set_req_max_sets {
-+ unsigned op;
-+ unsigned version;
-+ ip_set_id_t max_sets; /* max_sets */
-+ ip_set_id_t sets; /* real number of sets */
-+ union ip_set_name_index set; /* index of set if name used */
-+};
-+
-+/* Get the id and name of the sets plus size for next step */
-+#define IP_SET_OP_LIST_SIZE 0x00000201
-+#define IP_SET_OP_SAVE_SIZE 0x00000202
-+struct ip_set_req_setnames {
-+ unsigned op;
-+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
-+ /* followed by sets number of struct ip_set_name_list */
-+};
-+
-+struct ip_set_name_list {
-+ char name[IP_SET_MAXNAMELEN];
-+ char typename[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+ ip_set_id_t id;
-+};
-+
-+/* The actual list operation */
-+#define IP_SET_OP_LIST 0x00000203
-+struct ip_set_req_list {
-+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
-+};
-+
-+struct ip_set_list {
-+ ip_set_id_t index;
-+ ip_set_id_t binding;
-+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
-+};
-+
-+struct ip_set_hash_list {
-+ ip_set_ip_t ip;
-+ ip_set_id_t binding;
-+};
-+
-+/* The save operation */
-+#define IP_SET_OP_SAVE 0x00000204
-+/* Uses ip_set_req_list, in the reply replaced by
-+ * sets number of struct ip_set_save plus a marker
-+ * ip_set_save followed by ip_set_hash_save structures.
-+ */
-+struct ip_set_save {
-+ ip_set_id_t index;
-+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+};
-+
-+/* At restoring, ip == 0 means default binding for the given set: */
-+struct ip_set_hash_save {
-+ ip_set_ip_t ip;
-+ ip_set_id_t id;
-+ ip_set_id_t binding;
-+};
-+
-+/* The restore operation */
-+#define IP_SET_OP_RESTORE 0x00000205
-+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
-+ * structures.
-+ */
-+struct ip_set_restore {
-+ char name[IP_SET_MAXNAMELEN];
-+ char typename[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+};
-+
-+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
-+{
-+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
-+}
-+
-+#ifdef __KERNEL__
-+
-+#define ip_set_printk(format, args...) \
-+ do { \
-+ printk("%s: %s: ", __FILE__, __FUNCTION__); \
-+ printk(format "\n" , ## args); \
-+ } while (0)
-+
-+#if defined(IP_SET_DEBUG)
-+#define DP(format, args...) \
-+ do { \
-+ printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
-+ printk(format "\n" , ## args); \
-+ } while (0)
-+#define IP_SET_ASSERT(x) \
-+ do { \
-+ if (!(x)) \
-+ printk("IP_SET_ASSERT: %s:%i(%s)\n", \
-+ __FILE__, __LINE__, __FUNCTION__); \
-+ } while (0)
-+#else
-+#define DP(format, args...)
-+#define IP_SET_ASSERT(x)
-+#endif
-+
-+struct ip_set;
-+
-+/*
-+ * The ip_set_type definition - one per set type, e.g. "ipmap".
-+ *
-+ * Each individual set has a pointer, set->type, going to one
-+ * of these structures. Function pointers inside the structure implement
-+ * the real behaviour of the sets.
-+ *
-+ * If not mentioned differently, the implementation behind the function
-+ * pointers of a set_type, is expected to return 0 if ok, and a negative
-+ * errno (e.g. -EINVAL) on error.
-+ */
-+struct ip_set_type {
-+ struct list_head list; /* next in list of set types */
-+
-+ /* test for IP in set (kernel: iptables -m set src|dst)
-+ * return 0 if not in set, 1 if in set.
-+ */
-+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* test for IP in set (userspace: ipset -T set IP)
-+ * return 0 if not in set, 1 if in set.
-+ */
-+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /*
-+ * Size of the data structure passed by when
-+ * adding/deletin/testing an entry.
-+ */
-+ size_t reqsize;
-+
-+ /* Add IP into set (userspace: ipset -A set IP)
-+ * Return -EEXIST if the address is already in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address was not already in the set, 0 is returned.
-+ */
-+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
-+ * Return -EEXIST if the address is already in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address was not already in the set, 0 is returned.
-+ */
-+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* remove IP from set (userspace: ipset -D set --entry x)
-+ * Return -EEXIST if the address is NOT in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address really was in the set, 0 is returned.
-+ */
-+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
-+ * Return -EEXIST if the address is NOT in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address really was in the set, 0 is returned.
-+ */
-+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* new set creation - allocated type specific items
-+ */
-+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
-+
-+ /* retry the operation after successfully tweaking the set
-+ */
-+ int (*retry) (struct ip_set *set);
-+
-+ /* set destruction - free type specific items
-+ * There is no return value.
-+ * Can be called only when child sets are destroyed.
-+ */
-+ void (*destroy) (struct ip_set *set);
-+
-+ /* set flushing - reset all bits in the set, or something similar.
-+ * There is no return value.
-+ */
-+ void (*flush) (struct ip_set *set);
-+
-+ /* Listing: size needed for header
-+ */
-+ size_t header_size;
-+
-+ /* Listing: Get the header
-+ *
-+ * Fill in the information in "data".
-+ * This function is always run after list_header_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
-+ */
-+ void (*list_header) (const struct ip_set *set,
-+ void *data);
-+
-+ /* Listing: Get the size for the set members
-+ */
-+ int (*list_members_size) (const struct ip_set *set);
-+
-+ /* Listing: Get the set members
-+ *
-+ * Fill in the information in "data".
-+ * This function is always run after list_member_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
-+ */
-+ void (*list_members) (const struct ip_set *set,
-+ void *data);
-+
-+ char typename[IP_SET_MAXNAMELEN];
-+ unsigned char features;
-+ int protocol_version;
-+
-+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
-+ struct module *me;
-+};
-+
-+extern int ip_set_register_set_type(struct ip_set_type *set_type);
-+extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
-+
-+/* A generic ipset */
-+struct ip_set {
-+ char name[IP_SET_MAXNAMELEN]; /* the name of the set */
-+ rwlock_t lock; /* lock for concurrency control */
-+ ip_set_id_t id; /* set id for swapping */
-+ ip_set_id_t binding; /* default binding for the set */
-+ atomic_t ref; /* in kernel and in hash references */
-+ struct ip_set_type *type; /* the set types */
-+ void *data; /* pooltype specific data */
-+};
-+
-+/* Structure to bind set elements to sets */
-+struct ip_set_hash {
-+ struct list_head list; /* list of clashing entries in hash */
-+ ip_set_ip_t ip; /* ip from set */
-+ ip_set_id_t id; /* set id */
-+ ip_set_id_t binding; /* set we bind the element to */
-+};
-+
-+/* register and unregister set references */
-+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
-+
-+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern int ip_set_testip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_H*/
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_iphash.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_iphash.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,30 @@
-+#ifndef __IP_SET_IPHASH_H
-+#define __IP_SET_IPHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_iphash {
-+ ip_set_ip_t *members; /* the iphash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_iphash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t netmask;
-+};
-+
-+struct ip_set_req_iphash {
-+ ip_set_ip_t ip;
-+};
-+
-+#endif /* __IP_SET_IPHASH_H */
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_ipmap.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_ipmap.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,56 @@
-+#ifndef __IP_SET_IPMAP_H
-+#define __IP_SET_IPMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_ipmap {
-+ void *members; /* the ipmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ ip_set_ip_t netmask; /* subnet netmask */
-+ ip_set_ip_t sizeid; /* size of set in IPs */
-+ ip_set_ip_t hosts; /* number of hosts in a subnet */
-+};
-+
-+struct ip_set_req_ipmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+ ip_set_ip_t netmask;
-+};
-+
-+struct ip_set_req_ipmap {
-+ ip_set_ip_t ip;
-+};
-+
-+unsigned int
-+mask_to_bits(ip_set_ip_t mask)
-+{
-+ unsigned int bits = 32;
-+ ip_set_ip_t maskaddr;
-+
-+ if (mask == 0xFFFFFFFF)
-+ return bits;
-+
-+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
-+ maskaddr <<= 1;
-+
-+ return bits;
-+}
-+
-+ip_set_ip_t
-+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
-+{
-+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
-+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
-+ mask <<= 1;
-+
-+ return mask;
-+}
-+
-+#endif /* __IP_SET_IPMAP_H */
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_ipporthash.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,34 @@
-+#ifndef __IP_SET_IPPORTHASH_H
-+#define __IP_SET_IPPORTHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
-+
-+struct ip_set_ipporthash {
-+ ip_set_ip_t *members; /* the ipporthash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_ipporthash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_ipporthash {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t port;
-+};
-+
-+#endif /* __IP_SET_IPPORTHASH_H */
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_iptree.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_iptree.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,40 @@
-+#ifndef __IP_SET_IPTREE_H
-+#define __IP_SET_IPTREE_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_iptreed {
-+ unsigned long expires[256]; /* x.x.x.ADDR */
-+};
-+
-+struct ip_set_iptreec {
-+ struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
-+};
-+
-+struct ip_set_iptreeb {
-+ struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
-+};
-+
-+struct ip_set_iptree {
-+ unsigned int timeout;
-+ unsigned int gc_interval;
-+#ifdef __KERNEL__
-+ uint32_t elements; /* number of elements */
-+ struct timer_list gc;
-+ struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
-+#endif
-+};
-+
-+struct ip_set_req_iptree_create {
-+ unsigned int timeout;
-+};
-+
-+struct ip_set_req_iptree {
-+ ip_set_ip_t ip;
-+ unsigned int timeout;
-+};
-+
-+#endif /* __IP_SET_IPTREE_H */
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_macipmap.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_macipmap.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,38 @@
-+#ifndef __IP_SET_MACIPMAP_H
-+#define __IP_SET_MACIPMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
-+
-+/* general flags */
-+#define IPSET_MACIP_MATCHUNSET 1
-+
-+/* per ip flags */
-+#define IPSET_MACIP_ISSET 1
-+
-+struct ip_set_macipmap {
-+ void *members; /* the macipmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ u_int32_t flags;
-+};
-+
-+struct ip_set_req_macipmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+ u_int32_t flags;
-+};
-+
-+struct ip_set_req_macipmap {
-+ ip_set_ip_t ip;
-+ unsigned char ethernet[ETH_ALEN];
-+};
-+
-+struct ip_set_macip {
-+ unsigned short flags;
-+ unsigned char ethernet[ETH_ALEN];
-+};
-+
-+#endif /* __IP_SET_MACIPMAP_H */
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_malloc.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_malloc.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,116 @@
-+#ifndef _IP_SET_MALLOC_H
-+#define _IP_SET_MALLOC_H
-+
-+#ifdef __KERNEL__
-+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
-+
-+static inline void init_max_malloc_size(void)
-+{
-+#define CACHE(x) max_malloc_size = x;
-+#include <linux/kmalloc_sizes.h>
-+#undef CACHE
-+}
-+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
-+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
-+}
-+
-+struct harray {
-+ size_t max_elements;
-+ void *arrays[0];
-+};
-+
-+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
-+{
-+ struct harray *harray;
-+ size_t max_elements, size, i, j;
-+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
-+
-+ if (typesize > max_malloc_size)
-+ return NULL;
-+
-+ max_elements = max_malloc_size/typesize;
-+ size = hashsize/max_elements;
-+ if (hashsize % max_elements)
-+ size++;
-+
-+ /* Last pointer signals end of arrays */
-+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
-+ flags);
-+
-+ if (!harray)
-+ return NULL;
-+
-+ for (i = 0; i < size - 1; i++) {
-+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
-+ if (!harray->arrays[i])
-+ goto undo;
-+ memset(harray->arrays[i], 0, max_elements * typesize);
-+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
-+ flags);
-+ if (!harray->arrays[i])
-+ goto undo;
-+ memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
-+
-+ harray->max_elements = max_elements;
-+ harray->arrays[size] = NULL;
-+
-+ return (void *)harray;
-+
-+ undo:
-+ for (j = 0; j < i; j++) {
-+ kfree(harray->arrays[j]);
-+ }
-+ kfree(harray);
-+ return NULL;
-+}
-+
-+static inline void harray_free(void *h)
-+{
-+ struct harray *harray = (struct harray *) h;
-+ size_t i;
-+
-+ for (i = 0; harray->arrays[i] != NULL; i++)
-+ kfree(harray->arrays[i]);
-+ kfree(harray);
-+}
-+
-+static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
-+{
-+ struct harray *harray = (struct harray *) h;
-+ size_t i;
-+
-+ for (i = 0; harray->arrays[i+1] != NULL; i++)
-+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
-+ (hashsize - i * harray->max_elements) * typesize);
-+}
-+
-+#define HARRAY_ELEM(h, type, which) \
-+({ \
-+ struct harray *__h = (struct harray *)(h); \
-+ ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
-+ + (which)%(__h)->max_elements); \
-+})
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_MALLOC_H*/
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_nethash.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_nethash.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,55 @@
-+#ifndef __IP_SET_NETHASH_H
-+#define __IP_SET_NETHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_nethash {
-+ ip_set_ip_t *members; /* the nethash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_nethash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+};
-+
-+struct ip_set_req_nethash {
-+ ip_set_ip_t ip;
-+ unsigned char cidr;
-+};
-+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
-+#endif /* __IP_SET_NETHASH_H */
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_portmap.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ip_set_portmap.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,25 @@
-+#ifndef __IP_SET_PORTMAP_H
-+#define __IP_SET_PORTMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
-+
-+struct ip_set_portmap {
-+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
-+};
-+
-+struct ip_set_req_portmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
-+};
-+
-+#endif /* __IP_SET_PORTMAP_H */
-Index: linux-2.6.22-rc5/include/linux/netfilter_ipv4/ipt_set.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/include/linux/netfilter_ipv4/ipt_set.h 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,21 @@
-+#ifndef _IPT_SET_H
-+#define _IPT_SET_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+struct ipt_set_info {
-+ ip_set_id_t index;
-+ u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
-+};
-+
-+/* match info */
-+struct ipt_set_info_match {
-+ struct ipt_set_info match_set;
-+};
-+
-+struct ipt_set_info_target {
-+ struct ipt_set_info add_set;
-+ struct ipt_set_info del_set;
-+};
-+
-+#endif /*_IPT_SET_H*/
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ip_set.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ip_set.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,2001 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module for IP set management */
-+
-+#include <linux/version.h>
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+#include <linux/config.h>
-+#endif
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/kmod.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <asm/semaphore.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+
-+#define ASSERT_READ_LOCK(x)
-+#define ASSERT_WRITE_LOCK(x)
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+static struct list_head set_type_list; /* all registered sets */
-+static struct ip_set **ip_set_list; /* all individual sets */
-+static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
-+static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
-+static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
-+static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
-+static struct list_head *ip_set_hash; /* hash of bindings */
-+static unsigned int ip_set_hash_random; /* random seed */
-+
-+/*
-+ * Sets are identified either by the index in ip_set_list or by id.
-+ * The id never changes and is used to find a key in the hash.
-+ * The index may change by swapping and used at all other places
-+ * (set/SET netfilter modules, binding value, etc.)
-+ *
-+ * Userspace requests are serialized by ip_set_mutex and sets can
-+ * be deleted only from userspace. Therefore ip_set_list locking
-+ * must obey the following rules:
-+ *
-+ * - kernel requests: read and write locking mandatory
-+ * - user requests: read locking optional, write locking mandatory
-+ */
-+
-+static inline void
-+__ip_set_get(ip_set_id_t index)
-+{
-+ atomic_inc(&ip_set_list[index]->ref);
-+}
-+
-+static inline void
-+__ip_set_put(ip_set_id_t index)
-+{
-+ atomic_dec(&ip_set_list[index]->ref);
-+}
-+
-+/*
-+ * Binding routines
-+ */
-+
-+static inline struct ip_set_hash *
-+__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ struct ip_set_hash *set_hash;
-+
-+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
-+ if (set_hash->id == id && set_hash->ip == ip)
-+ return set_hash;
-+
-+ return NULL;
-+}
-+
-+static ip_set_id_t
-+ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+
-+ ASSERT_READ_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
-+ set_hash = __ip_set_find(key, id, ip);
-+
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip),
-+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
-+
-+ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
-+}
-+
-+static inline void
-+__set_hash_del(struct ip_set_hash *set_hash)
-+{
-+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
-+
-+ __ip_set_put(set_hash->binding);
-+ list_del(&set_hash->list);
-+ kfree(set_hash);
-+}
-+
-+static int
-+ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+ write_lock_bh(&ip_set_lock);
-+ set_hash = __ip_set_find(key, id, ip);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip),
-+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
-+
-+ if (set_hash != NULL)
-+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+}
-+
-+static int
-+ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+ int ret = 0;
-+
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ IP_SET_ASSERT(ip_set_list[binding]);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip), ip_set_list[binding]->name);
-+ write_lock_bh(&ip_set_lock);
-+ set_hash = __ip_set_find(key, id, ip);
-+ if (!set_hash) {
-+ set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
-+ if (!set_hash) {
-+ ret = -ENOMEM;
-+ goto unlock;
-+ }
-+ INIT_LIST_HEAD(&set_hash->list);
-+ set_hash->id = id;
-+ set_hash->ip = ip;
-+ list_add(&set_hash->list, &ip_set_hash[key]);
-+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
-+ DP("overwrite binding: %s",
-+ ip_set_list[set_hash->binding]->name);
-+ __ip_set_put(set_hash->binding);
-+ }
-+ set_hash->binding = binding;
-+ __ip_set_get(set_hash->binding);
-+ DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
-+ key, id, ip_set_list[id]->name,
-+ HIPQUAD(ip), binding, ip_set_list[binding]->name);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return ret;
-+}
-+
-+#define FOREACH_HASH_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __key; \
-+ struct ip_set_hash *__set_hash; \
-+ \
-+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
-+ list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
-+ fn(__set_hash , ## args); \
-+ } \
-+})
-+
-+#define FOREACH_HASH_RW_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __key; \
-+ struct ip_set_hash *__set_hash, *__n; \
-+ \
-+ ASSERT_WRITE_LOCK(&ip_set_lock); \
-+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
-+ list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
-+ fn(__set_hash , ## args); \
-+ } \
-+})
-+
-+/* Add, del and test set entries from kernel */
-+
-+#define follow_bindings(index, set, ip) \
-+((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
-+ || (index = (set)->binding) != IP_SET_INVALID_ID)
-+
-+int
-+ip_set_testip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ read_lock_bh(&set->lock);
-+ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
-+ read_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while (res > 0
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return res;
-+}
-+
-+void
-+ip_set_addip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ retry:
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ write_lock_bh(&set->lock);
-+ res = set->type->addip_kernel(set, skb, &ip, flags, i++);
-+ write_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ if (res == -EAGAIN
-+ && set->type->retry
-+ && (res = set->type->retry(set)) == 0)
-+ goto retry;
-+}
-+
-+void
-+ip_set_delip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ write_lock_bh(&set->lock);
-+ res = set->type->delip_kernel(set, skb, &ip, flags, i++);
-+ write_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+}
-+
-+/* Register and deregister settype */
-+
-+static inline struct ip_set_type *
-+find_set_type(const char *name)
-+{
-+ struct ip_set_type *set_type;
-+
-+ list_for_each_entry(set_type, &set_type_list, list)
-+ if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
-+ return set_type;
-+ return NULL;
-+}
-+
-+int
-+ip_set_register_set_type(struct ip_set_type *set_type)
-+{
-+ int ret = 0;
-+
-+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
-+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
-+ set_type->typename,
-+ set_type->protocol_version,
-+ IP_SET_PROTOCOL_VERSION);
-+ return -EINVAL;
-+ }
-+
-+ write_lock_bh(&ip_set_lock);
-+ if (find_set_type(set_type->typename)) {
-+ /* Duplicate! */
-+ ip_set_printk("'%s' already registered!",
-+ set_type->typename);
-+ ret = -EINVAL;
-+ goto unlock;
-+ }
-+ if (!try_module_get(THIS_MODULE)) {
-+ ret = -EFAULT;
-+ goto unlock;
-+ }
-+ list_add(&set_type->list, &set_type_list);
-+ DP("'%s' registered.", set_type->typename);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return ret;
-+}
-+
-+void
-+ip_set_unregister_set_type(struct ip_set_type *set_type)
-+{
-+ write_lock_bh(&ip_set_lock);
-+ if (!find_set_type(set_type->typename)) {
-+ ip_set_printk("'%s' not registered?",
-+ set_type->typename);
-+ goto unlock;
-+ }
-+ list_del(&set_type->list);
-+ module_put(THIS_MODULE);
-+ DP("'%s' unregistered.", set_type->typename);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+
-+}
-+
-+/*
-+ * Userspace routines
-+ */
-+
-+/*
-+ * Find set by name, reference it once. The reference makes sure the
-+ * thing pointed to, does not go away under our feet. Drop the reference
-+ * later, using ip_set_put().
-+ */
-+ip_set_id_t
-+ip_set_get_byname(const char *name)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ down(&ip_set_app_mutex);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
-+ __ip_set_get(i);
-+ index = i;
-+ break;
-+ }
-+ }
-+ up(&ip_set_app_mutex);
-+ return index;
-+}
-+
-+/*
-+ * Find set by index, reference it once. The reference makes sure the
-+ * thing pointed to, does not go away under our feet. Drop the reference
-+ * later, using ip_set_put().
-+ */
-+ip_set_id_t
-+ip_set_get_byindex(ip_set_id_t index)
-+{
-+ down(&ip_set_app_mutex);
-+
-+ if (index >= ip_set_max)
-+ return IP_SET_INVALID_ID;
-+
-+ if (ip_set_list[index])
-+ __ip_set_get(index);
-+ else
-+ index = IP_SET_INVALID_ID;
-+
-+ up(&ip_set_app_mutex);
-+ return index;
-+}
-+
-+/*
-+ * If the given set pointer points to a valid set, decrement
-+ * reference count by 1. The caller shall not assume the index
-+ * to be valid, after calling this function.
-+ */
-+void ip_set_put(ip_set_id_t index)
-+{
-+ down(&ip_set_app_mutex);
-+ if (ip_set_list[index])
-+ __ip_set_put(index);
-+ up(&ip_set_app_mutex);
-+}
-+
-+/* Find a set by name or index */
-+static ip_set_id_t
-+ip_set_find_byname(const char *name)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
-+ index = i;
-+ break;
-+ }
-+ }
-+ return index;
-+}
-+
-+static ip_set_id_t
-+ip_set_find_byindex(ip_set_id_t index)
-+{
-+ if (index >= ip_set_max || ip_set_list[index] == NULL)
-+ index = IP_SET_INVALID_ID;
-+
-+ return index;
-+}
-+
-+/*
-+ * Add, del, test, bind and unbind
-+ */
-+
-+static inline int
-+__ip_set_testip(struct ip_set *set,
-+ const void *data,
-+ size_t size,
-+ ip_set_ip_t *ip)
-+{
-+ int res;
-+
-+ read_lock_bh(&set->lock);
-+ res = set->type->testip(set, data, size, ip);
-+ read_unlock_bh(&set->lock);
-+
-+ return res;
-+}
-+
-+static int
-+__ip_set_addip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ do {
-+ write_lock_bh(&set->lock);
-+ res = set->type->addip(set, data, size, &ip);
-+ write_unlock_bh(&set->lock);
-+ } while (res == -EAGAIN
-+ && set->type->retry
-+ && (res = set->type->retry(set)) == 0);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_addip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+
-+ return __ip_set_addip(index,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt));
-+}
-+
-+static int
-+ip_set_delip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ write_lock_bh(&set->lock);
-+ res = set->type->delip(set,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt),
-+ &ip);
-+ write_unlock_bh(&set->lock);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_testip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt),
-+ &ip);
-+
-+ return (res > 0 ? -EEXIST : res);
-+}
-+
-+static int
-+ip_set_bindip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_id_t binding;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of a set */
-+ char *binding_name;
-+
-+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
-+ return -EINVAL;
-+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ binding = ip_set_find_byname(binding_name);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ write_lock_bh(&ip_set_lock);
-+ /* Sets as binding values are referenced */
-+ if (set->binding != IP_SET_INVALID_ID)
-+ __ip_set_put(set->binding);
-+ set->binding = binding;
-+ __ip_set_get(set->binding);
-+ write_unlock_bh(&ip_set_lock);
-+
-+ return 0;
-+ }
-+ binding = ip_set_find_byname(req_bind->binding);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
-+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
-+ if (res >= 0)
-+ res = ip_set_hash_add(set->id, ip, binding);
-+
-+ return res;
-+}
-+
-+#define FOREACH_SET_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __i; \
-+ struct ip_set *__set; \
-+ \
-+ for (__i = 0; __i < ip_set_max; __i++) { \
-+ __set = ip_set_list[__i]; \
-+ if (__set != NULL) \
-+ fn(__set , ##args); \
-+ } \
-+})
-+
-+static inline void
-+__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
-+{
-+ if (set_hash->id == id)
-+ __set_hash_del(set_hash);
-+}
-+
-+static inline void
-+__unbind_default(struct ip_set *set)
-+{
-+ if (set->binding != IP_SET_INVALID_ID) {
-+ /* Sets as binding values are referenced */
-+ __ip_set_put(set->binding);
-+ set->binding = IP_SET_INVALID_ID;
-+ }
-+}
-+
-+static int
-+ip_set_unbindip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ DP("");
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ DP("%u %s", index, req_bind->binding);
-+ if (index == IP_SET_INVALID_ID) {
-+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of sets */
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_SET_DO(__unbind_default);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all bindings of all sets*/
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ }
-+ DP("unreachable reached!");
-+ return -EINVAL;
-+ }
-+
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of set */
-+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
-+
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ write_lock_bh(&ip_set_lock);
-+ /* Sets in hash values are referenced */
-+ __ip_set_put(set->binding);
-+ set->binding = IP_SET_INVALID_ID;
-+ write_unlock_bh(&ip_set_lock);
-+
-+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all bindings */
-+
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ }
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+
-+ DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
-+ if (res >= 0)
-+ res = ip_set_hash_del(set->id, ip);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_testbind(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_id_t binding;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of set */
-+ char *binding_name;
-+
-+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
-+ return -EINVAL;
-+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ binding = ip_set_find_byname(binding_name);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ res = (set->binding == binding) ? -EEXIST : 0;
-+
-+ return res;
-+ }
-+ binding = ip_set_find_byname(req_bind->binding);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
-+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
-+ if (res >= 0)
-+ res = (ip_set_find_in_hash(set->id, ip) == binding)
-+ ? -EEXIST : 0;
-+
-+ return res;
-+}
-+
-+static struct ip_set_type *
-+find_set_type_rlock(const char *typename)
-+{
-+ struct ip_set_type *type;
-+
-+ read_lock_bh(&ip_set_lock);
-+ type = find_set_type(typename);
-+ if (type == NULL)
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return type;
-+}
-+
-+static int
-+find_free_id(const char *name,
-+ ip_set_id_t *index,
-+ ip_set_id_t *id)
-+{
-+ ip_set_id_t i;
-+
-+ *id = IP_SET_INVALID_ID;
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] == NULL) {
-+ if (*id == IP_SET_INVALID_ID)
-+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
-+ /* Name clash */
-+ return -EEXIST;
-+ }
-+ if (*id == IP_SET_INVALID_ID)
-+ /* No free slot remained */
-+ return -ERANGE;
-+ /* Check that index is usable as id (swapping) */
-+ check:
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && ip_set_list[i]->id == *id) {
-+ *id = i;
-+ goto check;
-+ }
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Create a set
-+ */
-+static int
-+ip_set_create(const char *name,
-+ const char *typename,
-+ ip_set_id_t restore,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set;
-+ ip_set_id_t index = 0, id;
-+ int res = 0;
-+
-+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
-+ /*
-+ * First, and without any locks, allocate and initialize
-+ * a normal base set structure.
-+ */
-+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
-+ if (!set)
-+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
-+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
-+ set->binding = IP_SET_INVALID_ID;
-+ atomic_set(&set->ref, 0);
-+
-+ /*
-+ * Next, take the &ip_set_lock, check that we know the type,
-+ * and take a reference on the type, to make sure it
-+ * stays available while constructing our new set.
-+ *
-+ * After referencing the type, we drop the &ip_set_lock,
-+ * and let the new set construction run without locks.
-+ */
-+ set->type = find_set_type_rlock(typename);
-+ if (set->type == NULL) {
-+ /* Try loading the module */
-+ char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
-+ strcpy(modulename, "ip_set_");
-+ strcat(modulename, typename);
-+ DP("try to load %s", modulename);
-+ request_module(modulename);
-+ set->type = find_set_type_rlock(typename);
-+ }
-+ if (set->type == NULL) {
-+ ip_set_printk("no set type '%s', set '%s' not created",
-+ typename, name);
-+ res = -ENOENT;
-+ goto out;
-+ }
-+ if (!try_module_get(set->type->me)) {
-+ read_unlock_bh(&ip_set_lock);
-+ res = -EFAULT;
-+ goto out;
-+ }
-+ read_unlock_bh(&ip_set_lock);
-+
-+ /*
-+ * Without holding any locks, create private part.
-+ */
-+ res = set->type->create(set, data, size);
-+ if (res != 0)
-+ goto put_out;
-+
-+ /* BTW, res==0 here. */
-+
-+ /*
-+ * Here, we have a valid, constructed set. &ip_set_lock again,
-+ * find free id/index and check that it is not already in
-+ * ip_set_list.
-+ */
-+ write_lock_bh(&ip_set_lock);
-+ if ((res = find_free_id(set->name, &index, &id)) != 0) {
-+ DP("no free id!");
-+ goto cleanup;
-+ }
-+
-+ /* Make sure restore gets the same index */
-+ if (restore != IP_SET_INVALID_ID && index != restore) {
-+ DP("Can't restore, sets are screwed up");
-+ res = -ERANGE;
-+ goto cleanup;
-+ }
-+
-+ /*
-+ * Finally! Add our shiny new set to the list, and be done.
-+ */
-+ DP("create: '%s' created with index %u, id %u!", set->name, index, id);
-+ set->id = id;
-+ ip_set_list[index] = set;
-+ write_unlock_bh(&ip_set_lock);
-+ return res;
-+
-+ cleanup:
-+ write_unlock_bh(&ip_set_lock);
-+ set->type->destroy(set);
-+ put_out:
-+ module_put(set->type->me);
-+ out:
-+ kfree(set);
-+ return res;
-+}
-+
-+/*
-+ * Destroy a given existing set
-+ */
-+static void
-+ip_set_destroy_set(ip_set_id_t index)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+
-+ IP_SET_ASSERT(set);
-+ DP("set: %s", set->name);
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
-+ if (set->binding != IP_SET_INVALID_ID)
-+ __ip_set_put(set->binding);
-+ ip_set_list[index] = NULL;
-+ write_unlock_bh(&ip_set_lock);
-+
-+ /* Must call it without holding any lock */
-+ set->type->destroy(set);
-+ module_put(set->type->me);
-+ kfree(set);
-+}
-+
-+/*
-+ * Destroy a set - or all sets
-+ * Sets must not be referenced/used.
-+ */
-+static int
-+ip_set_destroy(ip_set_id_t index)
-+{
-+ ip_set_id_t i;
-+
-+ /* ref modification always protected by the mutex */
-+ if (index != IP_SET_INVALID_ID) {
-+ if (atomic_read(&ip_set_list[index]->ref))
-+ return -EBUSY;
-+ ip_set_destroy_set(index);
-+ } else {
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && (atomic_read(&ip_set_list[i]->ref)))
-+ return -EBUSY;
-+ }
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL)
-+ ip_set_destroy_set(i);
-+ }
-+ }
-+ return 0;
-+}
-+
-+static void
-+ip_set_flush_set(struct ip_set *set)
-+{
-+ DP("set: %s %u", set->name, set->id);
-+
-+ write_lock_bh(&set->lock);
-+ set->type->flush(set);
-+ write_unlock_bh(&set->lock);
-+}
-+
-+/*
-+ * Flush data in a set - or in all sets
-+ */
-+static int
-+ip_set_flush(ip_set_id_t index)
-+{
-+ if (index != IP_SET_INVALID_ID) {
-+ IP_SET_ASSERT(ip_set_list[index]);
-+ ip_set_flush_set(ip_set_list[index]);
-+ } else
-+ FOREACH_SET_DO(ip_set_flush_set);
-+
-+ return 0;
-+}
-+
-+/* Rename a set */
-+static int
-+ip_set_rename(ip_set_id_t index, const char *name)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_id_t i;
-+ int res = 0;
-+
-+ DP("set: %s to %s", set->name, name);
-+ write_lock_bh(&ip_set_lock);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
-+ res = -EEXIST;
-+ goto unlock;
-+ }
-+ }
-+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return res;
-+}
-+
-+/*
-+ * Swap two sets so that name/index points to the other.
-+ * References are also swapped.
-+ */
-+static int
-+ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
-+{
-+ struct ip_set *from = ip_set_list[from_index];
-+ struct ip_set *to = ip_set_list[to_index];
-+ char from_name[IP_SET_MAXNAMELEN];
-+ u_int32_t from_ref;
-+
-+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
-+ if (from->type->features != to->type->features)
-+ return -ENOEXEC;
-+
-+ /* No magic here: ref munging protected by the mutex */
-+ write_lock_bh(&ip_set_lock);
-+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
-+ from_ref = atomic_read(&from->ref);
-+
-+ strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
-+ atomic_set(&from->ref, atomic_read(&to->ref));
-+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
-+ atomic_set(&to->ref, from_ref);
-+
-+ ip_set_list[from_index] = to;
-+ ip_set_list[to_index] = from;
-+
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+}
-+
-+/*
-+ * List set data
-+ */
-+
-+static inline void
-+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
-+{
-+ if (set_hash->id == id)
-+ *size += sizeof(struct ip_set_hash_list);
-+}
-+
-+static inline void
-+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
-+{
-+ if (set_hash->id == id)
-+ *size += sizeof(struct ip_set_hash_save);
-+}
-+
-+static inline void
-+__set_hash_bindings(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, void *data, int *used)
-+{
-+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
-+
-+ hash_list->ip = set_hash->ip;
-+ hash_list->binding = set_hash->binding;
-+ *used += sizeof(struct ip_set_hash_list);
-+ }
-+}
-+
-+static int ip_set_list_set(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_list *set_list;
-+
-+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
-+
-+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
-+
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_list) > len)
-+ goto not_enough_mem;
-+ *used += sizeof(struct ip_set_list);
-+
-+ read_lock_bh(&set->lock);
-+ /* Get and ensure set specific header size */
-+ set_list->header_size = set->type->header_size;
-+ if (*used + set_list->header_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in the header */
-+ set_list->index = index;
-+ set_list->binding = set->binding;
-+ set_list->ref = atomic_read(&set->ref);
-+
-+ /* Fill in set spefific header data */
-+ set->type->list_header(set, data + *used);
-+ *used += set_list->header_size;
-+
-+ /* Get and ensure set specific members size */
-+ set_list->members_size = set->type->list_members_size(set);
-+ if (*used + set_list->members_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in set spefific members data */
-+ set->type->list_members(set, data + *used);
-+ *used += set_list->members_size;
-+ read_unlock_bh(&set->lock);
-+
-+ /* Bindings */
-+
-+ /* Get and ensure set specific bindings size */
-+ set_list->bindings_size = 0;
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
-+ set->id, &set_list->bindings_size);
-+ if (*used + set_list->bindings_size > len)
-+ goto not_enough_mem;
-+
-+ /* Fill in set spefific bindings data */
-+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
-+ return 0;
-+
-+ unlock_set:
-+ read_unlock_bh(&set->lock);
-+ not_enough_mem:
-+ DP("not enough mem, try again");
-+ return -EAGAIN;
-+}
-+
-+/*
-+ * Save sets
-+ */
-+static int ip_set_save_set(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ struct ip_set *set;
-+ struct ip_set_save *set_save;
-+
-+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
-+
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_save) > len)
-+ goto not_enough_mem;
-+ *used += sizeof(struct ip_set_save);
-+
-+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
-+ data, data + *used);
-+
-+ read_lock_bh(&set->lock);
-+ /* Get and ensure set specific header size */
-+ set_save->header_size = set->type->header_size;
-+ if (*used + set_save->header_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in the header */
-+ set_save->index = index;
-+ set_save->binding = set->binding;
-+
-+ /* Fill in set spefific header data */
-+ set->type->list_header(set, data + *used);
-+ *used += set_save->header_size;
-+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
-+ /* Get and ensure set specific members size */
-+ set_save->members_size = set->type->list_members_size(set);
-+ if (*used + set_save->members_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in set spefific members data */
-+ set->type->list_members(set, data + *used);
-+ *used += set_save->members_size;
-+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
-+ return 0;
-+
-+ unlock_set:
-+ read_unlock_bh(&set->lock);
-+ not_enough_mem:
-+ DP("not enough mem, try again");
-+ return -EAGAIN;
-+}
-+
-+static inline void
-+__set_hash_save_bindings(struct ip_set_hash *set_hash,
-+ ip_set_id_t id,
-+ void *data,
-+ int *used,
-+ int len,
-+ int *res)
-+{
-+ if (*res == 0
-+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
-+ /* Ensure bindings size */
-+ if (*used + sizeof(struct ip_set_hash_save) > len) {
-+ *res = -ENOMEM;
-+ return;
-+ }
-+ hash_save->id = set_hash->id;
-+ hash_save->ip = set_hash->ip;
-+ hash_save->binding = set_hash->binding;
-+ *used += sizeof(struct ip_set_hash_save);
-+ }
-+}
-+
-+static int ip_set_save_bindings(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ int res = 0;
-+ struct ip_set_save *set_save;
-+
-+ DP("used %u, len %u", *used, len);
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_save) > len)
-+ return -ENOMEM;
-+
-+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
-+ set_save->index = IP_SET_INVALID_ID;
-+ set_save->header_size = 0;
-+ set_save->members_size = 0;
-+ *used += sizeof(struct ip_set_save);
-+
-+ DP("marker added used %u, len %u", *used, len);
-+ /* Fill in bindings data */
-+ if (index != IP_SET_INVALID_ID)
-+ /* Sets are identified by id in hash */
-+ index = ip_set_list[index]->id;
-+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
-+
-+ return res;
-+}
-+
-+/*
-+ * Restore sets
-+ */
-+static int ip_set_restore(void *data,
-+ int len)
-+{
-+ int res = 0;
-+ int line = 0, used = 0, members_size;
-+ struct ip_set *set;
-+ struct ip_set_hash_save *hash_save;
-+ struct ip_set_restore *set_restore;
-+ ip_set_id_t index;
-+
-+ /* Loop to restore sets */
-+ while (1) {
-+ line++;
-+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
-+ /* Get and ensure header size */
-+ if (used + sizeof(struct ip_set_restore) > len)
-+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
-+ used += sizeof(struct ip_set_restore);
-+
-+ /* Ensure data size */
-+ if (used
-+ + set_restore->header_size
-+ + set_restore->members_size > len)
-+ return line;
-+
-+ /* Check marker */
-+ if (set_restore->index == IP_SET_INVALID_ID) {
-+ line--;
-+ goto bindings;
-+ }
-+
-+ /* Try to create the set */
-+ DP("restore %s %s", set_restore->name, set_restore->typename);
-+ res = ip_set_create(set_restore->name,
-+ set_restore->typename,
-+ set_restore->index,
-+ data + used,
-+ set_restore->header_size);
-+
-+ if (res != 0)
-+ return line;
-+ used += set_restore->header_size;
-+
-+ index = ip_set_find_byindex(set_restore->index);
-+ DP("index %u, restore_index %u", index, set_restore->index);
-+ if (index != set_restore->index)
-+ return line;
-+ /* Try to restore members data */
-+ set = ip_set_list[index];
-+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
-+ while (members_size + set->type->reqsize <=
-+ set_restore->members_size) {
-+ line++;
-+ DP("members: %u, line %u", members_size, line);
-+ res = __ip_set_addip(index,
-+ data + used + members_size,
-+ set->type->reqsize);
-+ if (!(res == 0 || res == -EEXIST))
-+ return line;
-+ members_size += set->type->reqsize;
-+ }
-+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
-+ if (members_size != set_restore->members_size)
-+ return line++;
-+ used += set_restore->members_size;
-+ }
-+
-+ bindings:
-+ /* Loop to restore bindings */
-+ while (used < len) {
-+ line++;
-+
-+ DP("restore binding, line %u", line);
-+ /* Get and ensure size */
-+ if (used + sizeof(struct ip_set_hash_save) > len)
-+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
-+ used += sizeof(struct ip_set_hash_save);
-+
-+ /* hash_save->id is used to store the index */
-+ index = ip_set_find_byindex(hash_save->id);
-+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
-+ if (index != hash_save->id)
-+ return line;
-+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
-+ DP("corrupt binding set index %u", hash_save->binding);
-+ return line;
-+ }
-+ set = ip_set_list[hash_save->id];
-+ /* Null valued IP means default binding */
-+ if (hash_save->ip)
-+ res = ip_set_hash_add(set->id,
-+ hash_save->ip,
-+ hash_save->binding);
-+ else {
-+ IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
-+ write_lock_bh(&ip_set_lock);
-+ set->binding = hash_save->binding;
-+ __ip_set_get(set->binding);
-+ write_unlock_bh(&ip_set_lock);
-+ DP("default binding: %u", set->binding);
-+ }
-+ if (res != 0)
-+ return line;
-+ }
-+ if (used != len)
-+ return line;
-+
-+ return 0;
-+}
-+
-+static int
-+ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
-+{
-+ void *data;
-+ int res = 0; /* Assume OK */
-+ unsigned *op;
-+ struct ip_set_req_adt *req_adt;
-+ ip_set_id_t index = IP_SET_INVALID_ID;
-+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
-+ struct fn_table {
-+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
-+ } adtfn_table[] =
-+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
-+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
-+ };
-+
-+ DP("optval=%d, user=%p, len=%d", optval, user, len);
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (optval != SO_IP_SET)
-+ return -EBADF;
-+ if (len <= sizeof(unsigned)) {
-+ ip_set_printk("short userdata (want >%zu, got %u)",
-+ sizeof(unsigned), len);
-+ return -EINVAL;
-+ }
-+ data = vmalloc(len);
-+ if (!data) {
-+ DP("out of mem for %u bytes", len);
-+ return -ENOMEM;
-+ }
-+ if (copy_from_user(data, user, len) != 0) {
-+ res = -EFAULT;
-+ goto done;
-+ }
-+ if (down_interruptible(&ip_set_app_mutex)) {
-+ res = -EINTR;
-+ goto done;
-+ }
-+
-+ op = (unsigned *)data;
-+ DP("op=%x", *op);
-+
-+ if (*op < IP_SET_OP_VERSION) {
-+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
-+ res = -EPROTO;
-+ goto done;
-+ }
-+ }
-+
-+ switch (*op) {
-+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
-+
-+ if (len < sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+ res = ip_set_create(req_create->name,
-+ req_create->typename,
-+ IP_SET_INVALID_ID,
-+ data + sizeof(struct ip_set_req_create),
-+ len - sizeof(struct ip_set_req_create));
-+ goto done;
-+ }
-+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
-+
-+ if (len != sizeof(struct ip_set_req_std)) {
-+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_std), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
-+ /* Destroy all sets */
-+ index = IP_SET_INVALID_ID;
-+ } else {
-+ req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_destroy->name);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+
-+ res = ip_set_destroy(index);
-+ goto done;
-+ }
-+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
-+
-+ if (len != sizeof(struct ip_set_req_std)) {
-+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_std), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all sets */
-+ index = IP_SET_INVALID_ID;
-+ } else {
-+ req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_flush->name);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ res = ip_set_flush(index);
-+ goto done;
-+ }
-+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
-+
-+ if (len != sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ index = ip_set_find_byname(req_rename->name);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ res = ip_set_rename(index, req_rename->typename);
-+ goto done;
-+ }
-+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
-+ ip_set_id_t to_index;
-+
-+ if (len != sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("invalid SWAP data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ index = ip_set_find_byname(req_swap->name);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ to_index = ip_set_find_byname(req_swap->typename);
-+ if (to_index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ res = ip_set_swap(index, to_index);
-+ goto done;
-+ }
-+ default:
-+ break; /* Set identified by id */
-+ }
-+
-+ /* There we may have add/del/test/bind/unbind/test_bind operations */
-+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
-+ res = -EBADMSG;
-+ goto done;
-+ }
-+ adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
-+
-+ if (len < sizeof(struct ip_set_req_adt)) {
-+ ip_set_printk("short data in adt request (want >=%zu, got %u)",
-+ sizeof(struct ip_set_req_adt), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_adt = (struct ip_set_req_adt *) data;
-+
-+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
-+ if (!(*op == IP_SET_OP_UNBIND_SET
-+ && req_adt->index == IP_SET_INVALID_ID)) {
-+ index = ip_set_find_byindex(req_adt->index);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ res = adtfn(index, data, len);
-+
-+ done:
-+ up(&ip_set_app_mutex);
-+ vfree(data);
-+ if (res > 0)
-+ res = 0;
-+ DP("final result %d", res);
-+ return res;
-+}
-+
-+static int
-+ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
-+{
-+ int res = 0;
-+ unsigned *op;
-+ ip_set_id_t index = IP_SET_INVALID_ID;
-+ void *data;
-+ int copylen = *len;
-+
-+ DP("optval=%d, user=%p, len=%d", optval, user, *len);
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (optval != SO_IP_SET)
-+ return -EBADF;
-+ if (*len < sizeof(unsigned)) {
-+ ip_set_printk("short userdata (want >=%zu, got %d)",
-+ sizeof(unsigned), *len);
-+ return -EINVAL;
-+ }
-+ data = vmalloc(*len);
-+ if (!data) {
-+ DP("out of mem for %d bytes", *len);
-+ return -ENOMEM;
-+ }
-+ if (copy_from_user(data, user, *len) != 0) {
-+ res = -EFAULT;
-+ goto done;
-+ }
-+ if (down_interruptible(&ip_set_app_mutex)) {
-+ res = -EINTR;
-+ goto done;
-+ }
-+
-+ op = (unsigned *) data;
-+ DP("op=%x", *op);
-+
-+ if (*op < IP_SET_OP_VERSION) {
-+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
-+ res = -EPROTO;
-+ goto done;
-+ }
-+ }
-+
-+ switch (*op) {
-+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_version)) {
-+ ip_set_printk("invalid VERSION (want %zu, got %d)",
-+ sizeof(struct ip_set_req_version),
-+ *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_version->version = IP_SET_PROTOCOL_VERSION;
-+ res = copy_to_user(user, req_version,
-+ sizeof(struct ip_set_req_version));
-+ goto done;
-+ }
-+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_get_set)) {
-+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
-+ sizeof(struct ip_set_req_get_set), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_get->set.name);
-+ req_get->set.index = index;
-+ goto copy;
-+ }
-+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_get_set)) {
-+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
-+ sizeof(struct ip_set_req_get_set), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byindex(req_get->set.index);
-+ strncpy(req_get->set.name,
-+ index == IP_SET_INVALID_ID ? ""
-+ : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
-+ goto copy;
-+ }
-+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_adt_get)) {
-+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
-+ sizeof(struct ip_set_req_adt_get), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_get->set.name);
-+ if (index != IP_SET_INVALID_ID) {
-+ req_get->set.index = index;
-+ strncpy(req_get->typename,
-+ ip_set_list[index]->type->typename,
-+ IP_SET_MAXNAMELEN - 1);
-+ } else {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
-+ ip_set_id_t i;
-+
-+ if (*len != sizeof(struct ip_set_req_max_sets)) {
-+ ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
-+ sizeof(struct ip_set_req_max_sets), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
-+ req_max_sets->set.index = IP_SET_INVALID_ID;
-+ } else {
-+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_max_sets->set.index =
-+ ip_set_find_byname(req_max_sets->set.name);
-+ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ req_max_sets->max_sets = ip_set_max;
-+ req_max_sets->sets = 0;
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL)
-+ req_max_sets->sets++;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_LIST_SIZE:
-+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
-+ struct ip_set_name_list *name_list;
-+ struct ip_set *set;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_setnames)) {
-+ ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_setnames), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_setnames->size = 0;
-+ used = sizeof(struct ip_set_req_setnames);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] == NULL)
-+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
-+ used += sizeof(struct ip_set_name_list);
-+ if (used > copylen) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ set = ip_set_list[i];
-+ /* Fill in index, name, etc. */
-+ name_list->index = i;
-+ name_list->id = set->id;
-+ strncpy(name_list->name,
-+ set->name,
-+ IP_SET_MAXNAMELEN - 1);
-+ strncpy(name_list->typename,
-+ set->type->typename,
-+ IP_SET_MAXNAMELEN - 1);
-+ DP("filled %s of type %s, index %u\n",
-+ name_list->name, name_list->typename,
-+ name_list->index);
-+ if (!(req_setnames->index == IP_SET_INVALID_ID
-+ || req_setnames->index == i))
-+ continue;
-+ /* Update size */
-+ switch (*op) {
-+ case IP_SET_OP_LIST_SIZE: {
-+ req_setnames->size += sizeof(struct ip_set_list)
-+ + set->type->header_size
-+ + set->type->list_members_size(set);
-+ /* Sets are identified by id in the hash */
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
-+ set->id, &req_setnames->size);
-+ break;
-+ }
-+ case IP_SET_OP_SAVE_SIZE: {
-+ req_setnames->size += sizeof(struct ip_set_save)
-+ + set->type->header_size
-+ + set->type->list_members_size(set);
-+ FOREACH_HASH_DO(__set_hash_bindings_size_save,
-+ set->id, &req_setnames->size);
-+ break;
-+ }
-+ default:
-+ break;
-+ }
-+ }
-+ if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_list)) {
-+ ip_set_printk("short LIST (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_list), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ index = req_list->index;
-+ if (index != IP_SET_INVALID_ID
-+ && ip_set_find_byindex(index) != index) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ used = 0;
-+ if (index == IP_SET_INVALID_ID) {
-+ /* List all sets */
-+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
-+ res = ip_set_list_set(i, data, &used, *len);
-+ }
-+ } else {
-+ /* List an individual set */
-+ res = ip_set_list_set(index, data, &used, *len);
-+ }
-+ if (res != 0)
-+ goto done;
-+ else if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_list)) {
-+ ip_set_printk("short SAVE (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_list), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ index = req_save->index;
-+ if (index != IP_SET_INVALID_ID
-+ && ip_set_find_byindex(index) != index) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ used = 0;
-+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
-+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
-+ res = ip_set_save_set(i, data, &used, *len);
-+ }
-+ } else {
-+ /* Save an individual set */
-+ res = ip_set_save_set(index, data, &used, *len);
-+ }
-+ if (res == 0)
-+ res = ip_set_save_bindings(index, data, &used, *len);
-+
-+ if (res != 0)
-+ goto done;
-+ else if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
-+ int line;
-+
-+ if (*len < sizeof(struct ip_set_req_setnames)
-+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
-+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
-+ if (line != 0) {
-+ res = -EAGAIN;
-+ req_restore->size = line;
-+ copylen = sizeof(struct ip_set_req_setnames);
-+ goto copy;
-+ }
-+ goto done;
-+ }
-+ default:
-+ res = -EBADMSG;
-+ goto done;
-+ } /* end of switch(op) */
-+
-+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
-+ && ip_set_list[index]
-+ ? ip_set_list[index]->name
-+ : ":all:", copylen);
-+ res = copy_to_user(user, data, copylen);
-+
-+ done:
-+ up(&ip_set_app_mutex);
-+ vfree(data);
-+ if (res > 0)
-+ res = 0;
-+ DP("final result %d", res);
-+ return res;
-+}
-+
-+static struct nf_sockopt_ops so_set = {
-+ .pf = PF_INET,
-+ .set_optmin = SO_IP_SET,
-+ .set_optmax = SO_IP_SET + 1,
-+ .set = &ip_set_sockfn_set,
-+ .get_optmin = SO_IP_SET,
-+ .get_optmax = SO_IP_SET + 1,
-+ .get = &ip_set_sockfn_get,
-+ .use = 0
-+};
-+
-+static int max_sets, hash_size;
-+module_param(max_sets, int, 0600);
-+MODULE_PARM_DESC(max_sets, "maximal number of sets");
-+module_param(hash_size, int, 0600);
-+MODULE_PARM_DESC(hash_size, "hash size for bindings");
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("module implementing core IP set support");
-+
-+static int __init init(void)
-+{
-+ int res;
-+ ip_set_id_t i;
-+
-+ get_random_bytes(&ip_set_hash_random, 4);
-+ if (max_sets)
-+ ip_set_max = max_sets;
-+ ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
-+ if (!ip_set_list) {
-+ printk(KERN_ERR "Unable to create ip_set_list\n");
-+ return -ENOMEM;
-+ }
-+ memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
-+ if (hash_size)
-+ ip_set_bindings_hash_size = hash_size;
-+ ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
-+ if (!ip_set_hash) {
-+ printk(KERN_ERR "Unable to create ip_set_hash\n");
-+ vfree(ip_set_list);
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < ip_set_bindings_hash_size; i++)
-+ INIT_LIST_HEAD(&ip_set_hash[i]);
-+
-+ INIT_LIST_HEAD(&set_type_list);
-+
-+ res = nf_register_sockopt(&so_set);
-+ if (res != 0) {
-+ ip_set_printk("SO_SET registry failed: %d", res);
-+ vfree(ip_set_list);
-+ vfree(ip_set_hash);
-+ return res;
-+ }
-+ return 0;
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* There can't be any existing set or binding */
-+ nf_unregister_sockopt(&so_set);
-+ vfree(ip_set_list);
-+ vfree(ip_set_hash);
-+ DP("these are the famous last words");
-+}
-+
-+EXPORT_SYMBOL(ip_set_register_set_type);
-+EXPORT_SYMBOL(ip_set_unregister_set_type);
-+
-+EXPORT_SYMBOL(ip_set_get_byname);
-+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
-+
-+EXPORT_SYMBOL(ip_set_addip_kernel);
-+EXPORT_SYMBOL(ip_set_delip_kernel);
-+EXPORT_SYMBOL(ip_set_testip_kernel);
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_iphash.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_iphash.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,413 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip hash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_iphash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements > limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t id, *elem;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id(set, ip, hash_ip);
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->netmask = map->netmask;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iphash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_ipmap.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_ipmap.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,327 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the single bitmap type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_set_ipmap.h>
-+
-+static inline ip_set_ip_t
-+ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
-+{
-+ return (ip - map->first_ip)/map->hosts;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
-+
-+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
-+ return -EEXIST;
-+
-+ return 0;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
-+ return -EEXIST;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->netmask = req->netmask;
-+
-+ if (req->netmask == 0xFFFFFFFF) {
-+ map->hosts = 1;
-+ map->sizeid = map->last_ip - map->first_ip + 1;
-+ } else {
-+ unsigned int mask_bits, netmask_bits;
-+ ip_set_ip_t mask;
-+
-+ map->first_ip &= map->netmask; /* Should we better bark? */
-+
-+ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
-+ netmask_bits = mask_to_bits(map->netmask);
-+
-+ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
-+ || netmask_bits <= mask_bits)
-+ return -ENOEXEC;
-+
-+ DP("mask_bits %u, netmask_bits %u",
-+ mask_bits, netmask_bits);
-+ map->hosts = 2 << (32 - netmask_bits - 1);
-+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
-+ }
-+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
-+ return -ENOEXEC;
-+ }
-+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+ header->netmask = map->netmask;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_ipporthash.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_ipporthash.c 2007-06-20 20:00:06.000000000 +0100
-@@ -0,0 +1,535 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip+port hash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+ struct iphdr *iph = ip_hdr(skb);
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdrlen(skb), &tcph, sizeof(tcph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdrlen(skb), &udph, sizeof(udph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = HASH_IP(map, ip, port);
-+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return 0;
-+
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (map->elements > limit)
-+ return -ERANGE;
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = HASH_IP(map, ip, port);
-+
-+ return __add_haship(map, *hash_ip);
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ id = hash_id(set, ip, port, hash_ip);
-+
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_iptree.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_iptree.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,571 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the iptree type */
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
-+#include <linux/netfilter_ipv4/ip_set_iptree.h>
-+
-+static int limit = MAX_RANGE;
-+
-+/* Garbage collection interval in seconds: */
-+#define IPTREE_GC_TIME 5*60
-+/* Sleep so many milliseconds before trying again
-+ * to delete the gc timer at destroying/flushing a set */
-+#define IPTREE_DESTROY_SLEEP 100
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
-+
-+#define ABCD(a,b,c,d,addrp) do { \
-+ a = ((unsigned char *)addrp)[3]; \
-+ b = ((unsigned char *)addrp)[2]; \
-+ c = ((unsigned char *)addrp)[1]; \
-+ d = ((unsigned char *)addrp)[0]; \
-+} while (0)
-+
-+#define TESTIP_WALK(map, elem, branch) do { \
-+ if ((map)->tree[elem]) { \
-+ branch = (map)->tree[elem]; \
-+ } else \
-+ return 0; \
-+} while (0)
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
-+ TESTIP_WALK(map, a, btree);
-+ TESTIP_WALK(btree, b, ctree);
-+ TESTIP_WALK(ctree, c, dtree);
-+ DP("%lu %lu", dtree->expires[d], jiffies);
-+ return !!(map->timeout ? (time_after(dtree->expires[d], jiffies))
-+ : dtree->expires[d]);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
-+
-+#define ADDIP_WALK(map, elem, branch, type, cachep, flags) do { \
-+ if ((map)->tree[elem]) { \
-+ DP("found %u", elem); \
-+ branch = (map)->tree[elem]; \
-+ } else { \
-+ branch = (type *) \
-+ kmem_cache_alloc(cachep, flags); \
-+ if (branch == NULL) \
-+ return -ENOMEM; \
-+ memset(branch, 0, sizeof(*branch)); \
-+ (map)->tree[elem] = branch; \
-+ DP("alloc %u", elem); \
-+ } \
-+} while (0)
-+
-+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip,
-+ unsigned int __nocast flags)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+ int ret = 0;
-+
-+ if (!ip || map->elements > limit)
-+ /* We could call the garbage collector
-+ * but it's probably overkill */
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
-+ ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep, flags);
-+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep, flags);
-+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep, flags);
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
-+ ret = -EEXIST;
-+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
-+ /* Lottery */
-+ if (dtree->expires[d] == 0)
-+ dtree->expires[d] = 1;
-+ DP("%u %lu", d, dtree->expires[d]);
-+ if (ret == 0)
-+ map->elements++;
-+ return ret;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ map->timeout,
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
-+
-+#define DELIP_WALK(map, elem, branch) do { \
-+ if ((map)->tree[elem]) { \
-+ branch = (map)->tree[elem]; \
-+ } else \
-+ return -EEXIST; \
-+} while (0)
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DELIP_WALK(map, a, btree);
-+ DELIP_WALK(btree, b, ctree);
-+ DELIP_WALK(ctree, c, dtree);
-+
-+ if (dtree->expires[d]) {
-+ dtree->expires[d] = 0;
-+ map->elements--;
-+ return 0;
-+ }
-+ return -EEXIST;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+#define LOOP_WALK_BEGIN(map, i, branch) \
-+ for (i = 0; i < 256; i++) { \
-+ if (!(map)->tree[i]) \
-+ continue; \
-+ branch = (map)->tree[i]
-+
-+#define LOOP_WALK_END }
-+
-+static void ip_tree_gc(unsigned long ul_set)
-+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ unsigned char i,j,k;
-+
-+ i = j = k = 0;
-+ DP("gc: %s", set->name);
-+ write_lock_bh(&set->lock);
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]) {
-+ DP("gc: %u %u %u %u: expires %lu jiffies %lu",
-+ a, b, c, d,
-+ dtree->expires[d], jiffies);
-+ if (map->timeout
-+ && time_before(dtree->expires[d], jiffies)) {
-+ dtree->expires[d] = 0;
-+ map->elements--;
-+ } else
-+ k = 1;
-+ }
-+ }
-+ if (k == 0) {
-+ DP("gc: %s: leaf %u %u %u empty",
-+ set->name, a, b, c);
-+ kmem_cache_free(leaf_cachep, dtree);
-+ ctree->tree[c] = NULL;
-+ } else {
-+ DP("gc: %s: leaf %u %u %u not empty",
-+ set->name, a, b, c);
-+ j = 1;
-+ k = 0;
-+ }
-+ LOOP_WALK_END;
-+ if (j == 0) {
-+ DP("gc: %s: branch %u %u empty",
-+ set->name, a, b);
-+ kmem_cache_free(branch_cachep, ctree);
-+ btree->tree[b] = NULL;
-+ } else {
-+ DP("gc: %s: branch %u %u not empty",
-+ set->name, a, b);
-+ i = 1;
-+ j = k = 0;
-+ }
-+ LOOP_WALK_END;
-+ if (i == 0) {
-+ DP("gc: %s: branch %u empty",
-+ set->name, a);
-+ kmem_cache_free(branch_cachep, btree);
-+ map->tree[a] = NULL;
-+ } else {
-+ DP("gc: %s: branch %u not empty",
-+ set->name, a);
-+ i = j = k = 0;
-+ }
-+ LOOP_WALK_END;
-+ write_unlock_bh(&set->lock);
-+
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static inline void init_gc_timer(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ /* Even if there is no timeout for the entries,
-+ * we still have to call gc because delete
-+ * do not clean up empty branches */
-+ map->gc_interval = IPTREE_GC_TIME;
-+ init_timer(&map->gc);
-+ map->gc.data = (unsigned long) set;
-+ map->gc.function = ip_tree_gc;
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
-+ struct ip_set_iptree *map;
-+
-+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iptree));
-+ return -ENOMEM;
-+ }
-+ memset(map, 0, sizeof(*map));
-+ map->timeout = req->timeout;
-+ map->elements = 0;
-+ set->data = map;
-+
-+ init_gc_timer(set);
-+
-+ return 0;
-+}
-+
-+static void __flush(struct ip_set_iptree *map)
-+{
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ kmem_cache_free(leaf_cachep, dtree);
-+ LOOP_WALK_END;
-+ kmem_cache_free(branch_cachep, ctree);
-+ LOOP_WALK_END;
-+ kmem_cache_free(branch_cachep, btree);
-+ LOOP_WALK_END;
-+ map->elements = 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ /* gc might be running */
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREE_DESTROY_SLEEP);
-+ __flush(map);
-+ kfree(map);
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ unsigned int timeout = map->timeout;
-+
-+ /* gc might be running */
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREE_DESTROY_SLEEP);
-+ __flush(map);
-+ memset(map, 0, sizeof(*map));
-+ map->timeout = timeout;
-+
-+ init_gc_timer(set);
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
-+
-+ header->timeout = map->timeout;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ unsigned int count = 0;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
-+ count++;
-+ }
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+
-+ DP("members %u", count);
-+ return (count * sizeof(struct ip_set_req_iptree));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ size_t offset = 0;
-+ struct ip_set_req_iptree *entry;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
-+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
-+ entry->timeout = !map->timeout ? 0
-+ : (dtree->expires[d] - jiffies)/HZ;
-+ offset += sizeof(struct ip_set_req_iptree);
-+ }
-+ }
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+}
-+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptree type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ int ret;
-+
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+ if (!branch_cachep) {
-+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+ if (!leaf_cachep) {
-+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
-+ ret = -ENOMEM;
-+ goto free_branch;
-+ }
-+ ret = ip_set_register_set_type(&ip_set_iptree);
-+ if (ret == 0)
-+ goto out;
-+
-+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
-+ kmem_cache_destroy(branch_cachep);
-+ out:
-+ return ret;
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iptree);
-+ kmem_cache_destroy(leaf_cachep);
-+ kmem_cache_destroy(branch_cachep);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_macipmap.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_macipmap.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,353 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the macipmap type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->ip < map->first_ip || req->ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = req->ip;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
-+ return (memcmp(req->ethernet,
-+ &table[req->ip - map->first_ip].ethernet,
-+ ETH_ALEN) == 0);
-+ } else {
-+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
-+ }
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return 0;
-+
-+ *hash_ip = ip;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
-+ /* Is mac pointer valid?
-+ * If so, compare... */
-+ return (skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+ && (memcmp(eth_hdr(skb)->h_source,
-+ &table[ip - map->first_ip].ethernet,
-+ ETH_ALEN) == 0));
-+ } else {
-+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
-+ }
-+}
-+
-+/* returns 0 on success */
-+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
-+ return -EEXIST;
-+
-+ *hash_ip = ip;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
-+ return 0;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+
-+ if (!(skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
-+ return -EINVAL;
-+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
-+ return -EEXIST;
-+
-+ *hash_ip = ip;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
-+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+ header->flags = map->flags;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("macipmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_nethash.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_nethash.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,481 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing a cidr nethash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_nethash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = pack(ip, cidr);
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ __u32 id = UINT_MAX;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
-+ if (id != UINT_MAX)
-+ break;
-+ }
-+ return id;
-+}
-+
-+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (!ip || map->elements > limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+
-+ return __addip_base(map, *hash_ip);
-+}
-+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
-+
-+ return ret;
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ ip_set_ip_t id, *elem;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("nethash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_portmap.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ip_set_portmap.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,334 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing a port set type as a bitmap */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_portmap.h>
-+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+ struct iphdr *iph = ip_hdr(skb);
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
-+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+
-+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
-+}
-+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testport(set, req->port, hash_port);
-+}
-+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
-+
-+ res = __testport(set, port, hash_port);
-+
-+ return (res < 0 ? 0 : res);
-+}
-+
-+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
-+}
-+
-+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addport(set, req->port, hash_port);
-+}
-+
-+static int
-+addport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
-+}
-+
-+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
-+}
-+
-+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delport(set, req->port, hash_port);
-+}
-+
-+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ipt_set.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ipt_set.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,150 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module to match an IP set. */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ipt_set.h>
-+
-+static inline int
-+match_set(const struct ipt_set_info *info,
-+ const struct sk_buff *skb,
-+ int inv)
-+{
-+ if (ip_set_testip_kernel(info->index, skb, info->flags))
-+ inv = !inv;
-+ return inv;
-+}
-+
-+static int
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
-+#endif
-+{
-+ const struct ipt_set_info_match *info = matchinfo;
-+
-+ return match_set(&info->match_set,
-+ skb,
-+ info->match_set.flags[0] & IPSET_MATCH_INV);
-+}
-+
-+static int
-+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
-+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int matchsize,
-+#endif
-+ unsigned int hook_mask)
-+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
-+ ip_set_id_t index;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
-+ ip_set_printk("invalid matchsize %d", matchsize);
-+ return 0;
-+ }
-+#endif
-+
-+ index = ip_set_get_byindex(info->match_set.index);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("Cannot find set indentified by id %u to match",
-+ info->match_set.index);
-+ return 0; /* error */
-+ }
-+ if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
-+ ip_set_printk("That's nasty!");
-+ return 0; /* error */
-+ }
-+
-+ return 1;
-+}
-+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
-+ void *matchinfo)
-+#endif
-+{
-+ struct ipt_set_info_match *info = matchinfo;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
-+ ip_set_printk("invalid matchsize %d", matchsize);
-+ return;
-+ }
-+#endif
-+ ip_set_put(info->match_set.index);
-+}
-+
-+static struct ipt_match set_match = {
-+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+ .family = AF_INET,
-+#endif
-+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
-+ .checkentry = &checkentry,
-+ .destroy = &destroy,
-+ .me = THIS_MODULE
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptables IP set match module");
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
-+static int __init ipt_ipset_init(void)
-+{
-+ return ipt_register_match(&set_match);
-+}
-+
-+static void __exit ipt_ipset_fini(void)
-+{
-+ ipt_unregister_match(&set_match);
-+}
-+
-+module_init(ipt_ipset_init);
-+module_exit(ipt_ipset_fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/ipt_SET.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/ipt_SET.c 2007-06-19 23:19:01.000000000 +0100
-@@ -0,0 +1,169 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* ipt_SET.c - netfilter target to manipulate IP sets */
-+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
-+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
-+#include <linux/version.h>
-+#include <linux/skbuff.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
-+#include <linux/netfilter_ipv4.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ipt_set.h>
-+
-+static unsigned int
-+target(struct sk_buff **pskb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ const void *targinfo,
-+ void *userinfo)
-+#else
-+ const void *targinfo)
-+#endif
-+{
-+ const struct ipt_set_info_target *info = targinfo;
-+
-+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_addip_kernel(info->add_set.index,
-+ *pskb,
-+ info->add_set.flags);
-+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_delip_kernel(info->del_set.index,
-+ *pskb,
-+ info->del_set.flags);
-+
-+ return IPT_CONTINUE;
-+}
-+
-+static int
-+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
-+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int targinfosize,
-+#endif
-+ unsigned int hook_mask)
-+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
-+ ip_set_id_t index;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
-+ DP("bad target info size %u", targinfosize);
-+ return 0;
-+ }
-+#endif
-+
-+ if (info->add_set.index != IP_SET_INVALID_ID) {
-+ index = ip_set_get_byindex(info->add_set.index);
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("cannot find add_set index %u as target",
-+ info->add_set.index);
-+ return 0; /* error */
-+ }
-+ }
-+
-+ if (info->del_set.index != IP_SET_INVALID_ID) {
-+ index = ip_set_get_byindex(info->del_set.index);
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("cannot find del_set index %u as target",
-+ info->del_set.index);
-+ return 0; /* error */
-+ }
-+ }
-+ if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
-+ || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
-+ ip_set_printk("That's nasty!");
-+ return 0; /* error */
-+ }
-+
-+ return 1;
-+}
-+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
-+ void *targetinfo)
-+#endif
-+{
-+ struct ipt_set_info_target *info = targetinfo;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
-+ ip_set_printk("invalid targetsize %d", targetsize);
-+ return;
-+ }
-+#endif
-+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
-+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
-+}
-+
-+static struct ipt_target SET_target = {
-+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+ .family = AF_INET,
-+#endif
-+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
-+ .checkentry = checkentry,
-+ .destroy = destroy,
-+ .me = THIS_MODULE
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptables IP set target module");
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
-+static int __init ipt_SET_init(void)
-+{
-+ return ipt_register_target(&SET_target);
-+}
-+
-+static void __exit ipt_SET_fini(void)
-+{
-+ ipt_unregister_target(&SET_target);
-+}
-+
-+module_init(ipt_SET_init);
-+module_exit(ipt_SET_fini);
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/Kconfig
-===================================================================
---- linux-2.6.22-rc5.orig/net/ipv4/netfilter/Kconfig 2007-06-19 23:19:01.000000000 +0100
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/Kconfig 2007-06-20 19:20:37.000000000 +0100
-@@ -426,5 +426,114 @@
- Allows altering the ARP packet payload: source and destination
- hardware and network addresses.
-
-+config IP_NF_SET
-+ tristate "IP set support"
-+ depends on INET && NETFILTER
-+ help
-+ This option adds IP set support to the kernel.
-+ In order to define and use sets, you need the userspace utility
-+ ipset(8).
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_MAX
-+ int "Maximum number of IP sets"
-+ default 256
-+ range 2 65534
-+ depends on IP_NF_SET
-+ help
-+ You can define here default value of the maximum number
-+ of IP sets for the kernel.
-+
-+ The value can be overriden by the 'max_sets' module
-+ parameter of the 'ip_set' module.
-+
-+config IP_NF_SET_HASHSIZE
-+ int "Hash size for bindings of IP sets"
-+ default 1024
-+ depends on IP_NF_SET
-+ help
-+ You can define here default value of the hash size for
-+ bindings of IP sets.
-+
-+ The value can be overriden by the 'hash_size' module
-+ parameter of the 'ip_set' module.
-+
-+config IP_NF_SET_IPMAP
-+ tristate "ipmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_MACIPMAP
-+ tristate "macipmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the macipmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_PORTMAP
-+ tristate "portmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the portmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPHASH
-+ tristate "iphash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iphash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_NETHASH
-+ tristate "nethash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the nethash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPPORTHASH
-+ tristate "ipporthash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipporthash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPTREE
-+ tristate "iptree set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iptree set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_MATCH_SET
-+ tristate "set match support"
-+ depends on IP_NF_SET
-+ help
-+ Set matching matches against given IP sets.
-+ You need the ipset utility to create and set up the sets.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_TARGET_SET
-+ tristate "SET target support"
-+ depends on IP_NF_SET
-+ help
-+ The SET target makes possible to add/delete entries
-+ in IP sets.
-+ You need the ipset utility to create and set up the sets.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+
- endmenu
-
-Index: linux-2.6.22-rc5/net/ipv4/netfilter/Makefile
-===================================================================
---- linux-2.6.22-rc5.orig/net/ipv4/netfilter/Makefile 2007-06-19 23:19:01.000000000 +0100
-+++ linux-2.6.22-rc5/net/ipv4/netfilter/Makefile 2007-06-20 19:20:37.000000000 +0100
-@@ -48,6 +48,7 @@
- obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
- obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
- obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
-+obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
- obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
- obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
- obj-$(CONFIG_IP_NF_MATCH_LAYER7) += ipt_layer7.o
-@@ -64,6 +65,17 @@
- obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
- obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
- obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
-+obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
-+
-+# sets
-+obj-$(CONFIG_IP_NF_SET) += ip_set.o
-+obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
-+obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
-+obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
-+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
-+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
-+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
-+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
-
- # generic ARP tables
- obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
--- /dev/null
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,498 @@
++#ifndef _IP_SET_H
++#define _IP_SET_H
++
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#if 0
++#define IP_SET_DEBUG
++#endif
++
++/*
++ * A sockopt of such quality has hardly ever been seen before on the open
++ * market! This little beauty, hardly ever used: above 64, so it's
++ * traditionally used for firewalling, not touched (even once!) by the
++ * 2.0, 2.2 and 2.4 kernels!
++ *
++ * Comes with its own certificate of authenticity, valid anywhere in the
++ * Free world!
++ *
++ * Rusty, 19.4.2000
++ */
++#define SO_IP_SET 83
++
++/*
++ * Heavily modify by Joakim Axelsson 08.03.2002
++ * - Made it more modulebased
++ *
++ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
++ * - bindings added
++ * - in order to "deal with" backward compatibility, renamed to ipset
++ */
++
++/*
++ * Used so that the kernel module and ipset-binary can match their versions
++ */
++#define IP_SET_PROTOCOL_VERSION 2
++
++#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
++
++/* Lets work with our own typedef for representing an IP address.
++ * We hope to make the code more portable, possibly to IPv6...
++ *
++ * The representation works in HOST byte order, because most set types
++ * will perform arithmetic operations and compare operations.
++ *
++ * For now the type is an uint32_t.
++ *
++ * Make sure to ONLY use the functions when translating and parsing
++ * in order to keep the host byte order and make it more portable:
++ * parse_ip()
++ * parse_mask()
++ * parse_ipandmask()
++ * ip_tostring()
++ * (Joakim: where are they???)
++ */
++
++typedef uint32_t ip_set_ip_t;
++
++/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
++ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
++ */
++typedef uint16_t ip_set_id_t;
++
++#define IP_SET_INVALID_ID 65535
++
++/* How deep we follow bindings */
++#define IP_SET_MAX_BINDINGS 6
++
++/*
++ * Option flags for kernel operations (ipt_set_info)
++ */
++#define IPSET_SRC 0x01 /* Source match/add */
++#define IPSET_DST 0x02 /* Destination match/add */
++#define IPSET_MATCH_INV 0x04 /* Inverse matching */
++
++/*
++ * Set features
++ */
++#define IPSET_TYPE_IP 0x01 /* IP address type of set */
++#define IPSET_TYPE_PORT 0x02 /* Port type of set */
++#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
++#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++
++/* Reserved keywords */
++#define IPSET_TOKEN_DEFAULT ":default:"
++#define IPSET_TOKEN_ALL ":all:"
++
++/* SO_IP_SET operation constants, and their request struct types.
++ *
++ * Operation ids:
++ * 0-99: commands with version checking
++ * 100-199: add/del/test/bind/unbind
++ * 200-299: list, save, restore
++ */
++
++/* Single shot operations:
++ * version, create, destroy, flush, rename and swap
++ *
++ * Sets are identified by name.
++ */
++
++#define IP_SET_REQ_STD \
++ unsigned op; \
++ unsigned version; \
++ char name[IP_SET_MAXNAMELEN]
++
++#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
++struct ip_set_req_create {
++ IP_SET_REQ_STD;
++ char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
++struct ip_set_req_std {
++ IP_SET_REQ_STD;
++};
++
++#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
++/* Uses ip_set_req_std */
++
++#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
++/* Uses ip_set_req_create */
++
++#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
++/* Uses ip_set_req_create */
++
++union ip_set_name_index {
++ char name[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++};
++
++#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
++struct ip_set_req_get_set {
++ unsigned op;
++ unsigned version;
++ union ip_set_name_index set;
++};
++
++#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
++/* Uses ip_set_req_get_set */
++
++#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
++struct ip_set_req_version {
++ unsigned op;
++ unsigned version;
++};
++
++/* Double shots operations:
++ * add, del, test, bind and unbind.
++ *
++ * First we query the kernel to get the index and type of the target set,
++ * then issue the command. Validity of IP is checked in kernel in order
++ * to minimalize sockopt operations.
++ */
++
++/* Get minimal set data for add/del/test/bind/unbind IP */
++#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
++struct ip_set_req_adt_get {
++ unsigned op;
++ unsigned version;
++ union ip_set_name_index set;
++ char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_REQ_BYINDEX \
++ unsigned op; \
++ ip_set_id_t index;
++
++struct ip_set_req_adt {
++ IP_SET_REQ_BYINDEX;
++};
++
++#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++struct ip_set_req_bind {
++ IP_SET_REQ_BYINDEX;
++ char binding[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
++/* Uses ip_set_req_bind, with type speficic addage
++ * index = 0 means unbinding for all sets */
++
++#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++
++/* Multiple shots operations: list, save, restore.
++ *
++ * - check kernel version and query the max number of sets
++ * - get the basic information on all sets
++ * and size required for the next step
++ * - get actual set data: header, data, bindings
++ */
++
++/* Get max_sets and the index of a queried set
++ */
++#define IP_SET_OP_MAX_SETS 0x00000020
++struct ip_set_req_max_sets {
++ unsigned op;
++ unsigned version;
++ ip_set_id_t max_sets; /* max_sets */
++ ip_set_id_t sets; /* real number of sets */
++ union ip_set_name_index set; /* index of set if name used */
++};
++
++/* Get the id and name of the sets plus size for next step */
++#define IP_SET_OP_LIST_SIZE 0x00000201
++#define IP_SET_OP_SAVE_SIZE 0x00000202
++struct ip_set_req_setnames {
++ unsigned op;
++ ip_set_id_t index; /* set to list/save */
++ size_t size; /* size to get setdata/bindings */
++ /* followed by sets number of struct ip_set_name_list */
++};
++
++struct ip_set_name_list {
++ char name[IP_SET_MAXNAMELEN];
++ char typename[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++ ip_set_id_t id;
++};
++
++/* The actual list operation */
++#define IP_SET_OP_LIST 0x00000203
++struct ip_set_req_list {
++ IP_SET_REQ_BYINDEX;
++ /* sets number of struct ip_set_list in reply */
++};
++
++struct ip_set_list {
++ ip_set_id_t index;
++ ip_set_id_t binding;
++ u_int32_t ref;
++ size_t header_size; /* Set header data of header_size */
++ size_t members_size; /* Set members data of members_size */
++ size_t bindings_size; /* Set bindings data of bindings_size */
++};
++
++struct ip_set_hash_list {
++ ip_set_ip_t ip;
++ ip_set_id_t binding;
++};
++
++/* The save operation */
++#define IP_SET_OP_SAVE 0x00000204
++/* Uses ip_set_req_list, in the reply replaced by
++ * sets number of struct ip_set_save plus a marker
++ * ip_set_save followed by ip_set_hash_save structures.
++ */
++struct ip_set_save {
++ ip_set_id_t index;
++ ip_set_id_t binding;
++ size_t header_size; /* Set header data of header_size */
++ size_t members_size; /* Set members data of members_size */
++};
++
++/* At restoring, ip == 0 means default binding for the given set: */
++struct ip_set_hash_save {
++ ip_set_ip_t ip;
++ ip_set_id_t id;
++ ip_set_id_t binding;
++};
++
++/* The restore operation */
++#define IP_SET_OP_RESTORE 0x00000205
++/* Uses ip_set_req_setnames followed by ip_set_restore structures
++ * plus a marker ip_set_restore, followed by ip_set_hash_save
++ * structures.
++ */
++struct ip_set_restore {
++ char name[IP_SET_MAXNAMELEN];
++ char typename[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++ size_t header_size; /* Create data of header_size */
++ size_t members_size; /* Set members data of members_size */
++};
++
++static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
++{
++ return 4 * ((((b - a + 8) / 8) + 3) / 4);
++}
++
++#ifdef __KERNEL__
++
++#define ip_set_printk(format, args...) \
++ do { \
++ printk("%s: %s: ", __FILE__, __FUNCTION__); \
++ printk(format "\n" , ## args); \
++ } while (0)
++
++#if defined(IP_SET_DEBUG)
++#define DP(format, args...) \
++ do { \
++ printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
++ printk(format "\n" , ## args); \
++ } while (0)
++#define IP_SET_ASSERT(x) \
++ do { \
++ if (!(x)) \
++ printk("IP_SET_ASSERT: %s:%i(%s)\n", \
++ __FILE__, __LINE__, __FUNCTION__); \
++ } while (0)
++#else
++#define DP(format, args...)
++#define IP_SET_ASSERT(x)
++#endif
++
++struct ip_set;
++
++/*
++ * The ip_set_type definition - one per set type, e.g. "ipmap".
++ *
++ * Each individual set has a pointer, set->type, going to one
++ * of these structures. Function pointers inside the structure implement
++ * the real behaviour of the sets.
++ *
++ * If not mentioned differently, the implementation behind the function
++ * pointers of a set_type, is expected to return 0 if ok, and a negative
++ * errno (e.g. -EINVAL) on error.
++ */
++struct ip_set_type {
++ struct list_head list; /* next in list of set types */
++
++ /* test for IP in set (kernel: iptables -m set src|dst)
++ * return 0 if not in set, 1 if in set.
++ */
++ int (*testip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* test for IP in set (userspace: ipset -T set IP)
++ * return 0 if not in set, 1 if in set.
++ */
++ int (*testip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /*
++ * Size of the data structure passed by when
++ * adding/deletin/testing an entry.
++ */
++ size_t reqsize;
++
++ /* Add IP into set (userspace: ipset -A set IP)
++ * Return -EEXIST if the address is already in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address was not already in the set, 0 is returned.
++ */
++ int (*addip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
++ * Return -EEXIST if the address is already in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address was not already in the set, 0 is returned.
++ */
++ int (*addip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* remove IP from set (userspace: ipset -D set --entry x)
++ * Return -EEXIST if the address is NOT in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address really was in the set, 0 is returned.
++ */
++ int (*delip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /* remove IP from set (kernel: iptables ... -j SET --entry x)
++ * Return -EEXIST if the address is NOT in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address really was in the set, 0 is returned.
++ */
++ int (*delip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* new set creation - allocated type specific items
++ */
++ int (*create) (struct ip_set *set,
++ const void *data, size_t size);
++
++ /* retry the operation after successfully tweaking the set
++ */
++ int (*retry) (struct ip_set *set);
++
++ /* set destruction - free type specific items
++ * There is no return value.
++ * Can be called only when child sets are destroyed.
++ */
++ void (*destroy) (struct ip_set *set);
++
++ /* set flushing - reset all bits in the set, or something similar.
++ * There is no return value.
++ */
++ void (*flush) (struct ip_set *set);
++
++ /* Listing: size needed for header
++ */
++ size_t header_size;
++
++ /* Listing: Get the header
++ *
++ * Fill in the information in "data".
++ * This function is always run after list_header_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
++ */
++ void (*list_header) (const struct ip_set *set,
++ void *data);
++
++ /* Listing: Get the size for the set members
++ */
++ int (*list_members_size) (const struct ip_set *set);
++
++ /* Listing: Get the set members
++ *
++ * Fill in the information in "data".
++ * This function is always run after list_member_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
++ */
++ void (*list_members) (const struct ip_set *set,
++ void *data);
++
++ char typename[IP_SET_MAXNAMELEN];
++ unsigned char features;
++ int protocol_version;
++
++ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
++ struct module *me;
++};
++
++extern int ip_set_register_set_type(struct ip_set_type *set_type);
++extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
++
++/* A generic ipset */
++struct ip_set {
++ char name[IP_SET_MAXNAMELEN]; /* the name of the set */
++ rwlock_t lock; /* lock for concurrency control */
++ ip_set_id_t id; /* set id for swapping */
++ ip_set_id_t binding; /* default binding for the set */
++ atomic_t ref; /* in kernel and in hash references */
++ struct ip_set_type *type; /* the set types */
++ void *data; /* pooltype specific data */
++};
++
++/* Structure to bind set elements to sets */
++struct ip_set_hash {
++ struct list_head list; /* list of clashing entries in hash */
++ ip_set_ip_t ip; /* ip from set */
++ ip_set_id_t id; /* set id */
++ ip_set_id_t binding; /* set we bind the element to */
++};
++
++/* register and unregister set references */
++extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
++extern void ip_set_put(ip_set_id_t id);
++
++/* API for iptables set match, and SET target */
++extern void ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern void ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_testip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_H*/
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iphash.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_iphash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iphash.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,30 @@
++#ifndef __IP_SET_IPHASH_H
++#define __IP_SET_IPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iphash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iphash {
++ ip_set_ip_t *members; /* the iphash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t netmask; /* netmask */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_iphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t netmask;
++};
++
++struct ip_set_req_iphash {
++ ip_set_ip_t ip;
++};
++
++#endif /* __IP_SET_IPHASH_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipmap.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_ipmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipmap.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,56 @@
++#ifndef __IP_SET_IPMAP_H
++#define __IP_SET_IPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipmap"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_ipmap {
++ void *members; /* the ipmap proper */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ ip_set_ip_t netmask; /* subnet netmask */
++ ip_set_ip_t sizeid; /* size of set in IPs */
++ ip_set_ip_t hosts; /* number of hosts in a subnet */
++};
++
++struct ip_set_req_ipmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++ ip_set_ip_t netmask;
++};
++
++struct ip_set_req_ipmap {
++ ip_set_ip_t ip;
++};
++
++unsigned int
++mask_to_bits(ip_set_ip_t mask)
++{
++ unsigned int bits = 32;
++ ip_set_ip_t maskaddr;
++
++ if (mask == 0xFFFFFFFF)
++ return bits;
++
++ maskaddr = 0xFFFFFFFE;
++ while (--bits >= 0 && maskaddr != mask)
++ maskaddr <<= 1;
++
++ return bits;
++}
++
++ip_set_ip_t
++range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
++{
++ ip_set_ip_t mask = 0xFFFFFFFE;
++
++ *bits = 32;
++ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ mask <<= 1;
++
++ return mask;
++}
++
++#endif /* __IP_SET_IPMAP_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_ipporthash.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipporthash.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_ipporthash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipporthash.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,34 @@
++#ifndef __IP_SET_IPPORTHASH_H
++#define __IP_SET_IPPORTHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipporthash"
++#define MAX_RANGE 0x0000FFFF
++#define INVALID_PORT (MAX_RANGE + 1)
++
++struct ip_set_ipporthash {
++ ip_set_ip_t *members; /* the ipporthash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipporthash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipporthash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++};
++
++#endif /* __IP_SET_IPPORTHASH_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptree.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_iptree.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptree.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,40 @@
++#ifndef __IP_SET_IPTREE_H
++#define __IP_SET_IPTREE_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iptree"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iptreed {
++ unsigned long expires[256]; /* x.x.x.ADDR */
++};
++
++struct ip_set_iptreec {
++ struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
++};
++
++struct ip_set_iptreeb {
++ struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
++};
++
++struct ip_set_iptree {
++ unsigned int timeout;
++ unsigned int gc_interval;
++#ifdef __KERNEL__
++ uint32_t elements; /* number of elements */
++ struct timer_list gc;
++ struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
++#endif
++};
++
++struct ip_set_req_iptree_create {
++ unsigned int timeout;
++};
++
++struct ip_set_req_iptree {
++ ip_set_ip_t ip;
++ unsigned int timeout;
++};
++
++#endif /* __IP_SET_IPTREE_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_iptreemap.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptreemap.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_iptreemap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptreemap.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,40 @@
++#ifndef __IP_SET_IPTREEMAP_H
++#define __IP_SET_IPTREEMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iptreemap"
++
++#ifdef __KERNEL__
++struct ip_set_iptreemap_d {
++ unsigned char bitmap[32]; /* x.x.x.y */
++};
++
++struct ip_set_iptreemap_c {
++ struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
++};
++
++struct ip_set_iptreemap_b {
++ struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
++ unsigned char dirty[32];
++};
++#endif
++
++struct ip_set_iptreemap {
++ unsigned int gc_interval;
++#ifdef __KERNEL__
++ struct timer_list gc;
++ struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
++#endif
++};
++
++struct ip_set_req_iptreemap_create {
++ unsigned int gc_interval;
++};
++
++struct ip_set_req_iptreemap {
++ ip_set_ip_t start;
++ ip_set_ip_t end;
++};
++
++#endif /* __IP_SET_IPTREEMAP_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_jhash.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_jhash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_jhash.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,148 @@
++#ifndef _LINUX_IPSET_JHASH_H
++#define _LINUX_IPSET_JHASH_H
++
++/* This is a copy of linux/jhash.h but the types u32/u8 are changed
++ * to __u32/__u8 so that the header file can be included into
++ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
++ */
++
++/* jhash.h: Jenkins hash support.
++ *
++ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ *
++ * http://burtleburtle.net/bob/hash/
++ *
++ * These are the credits from Bob's sources:
++ *
++ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
++ * hash(), hash2(), hash3, and mix() are externally useful functions.
++ * Routines to test the hash are included if SELF_TEST is defined.
++ * You can use this free for any purpose. It has no warranty.
++ *
++ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ *
++ * I've modified Bob's hash to be useful in the Linux kernel, and
++ * any bugs present are surely my fault. -DaveM
++ */
++
++/* NOTE: Arguments are modified. */
++#define __jhash_mix(a, b, c) \
++{ \
++ a -= b; a -= c; a ^= (c>>13); \
++ b -= c; b -= a; b ^= (a<<8); \
++ c -= a; c -= b; c ^= (b>>13); \
++ a -= b; a -= c; a ^= (c>>12); \
++ b -= c; b -= a; b ^= (a<<16); \
++ c -= a; c -= b; c ^= (b>>5); \
++ a -= b; a -= c; a ^= (c>>3); \
++ b -= c; b -= a; b ^= (a<<10); \
++ c -= a; c -= b; c ^= (b>>15); \
++}
++
++/* The golden ration: an arbitrary value */
++#define JHASH_GOLDEN_RATIO 0x9e3779b9
++
++/* The most generic version, hashes an arbitrary sequence
++ * of bytes. No alignment or length assumptions are made about
++ * the input key.
++ */
++static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++{
++ __u32 a, b, c, len;
++ __u8 *k = key;
++
++ len = length;
++ a = b = JHASH_GOLDEN_RATIO;
++ c = initval;
++
++ while (len >= 12) {
++ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
++ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
++ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
++
++ __jhash_mix(a,b,c);
++
++ k += 12;
++ len -= 12;
++ }
++
++ c += length;
++ switch (len) {
++ case 11: c += ((__u32)k[10]<<24);
++ case 10: c += ((__u32)k[9]<<16);
++ case 9 : c += ((__u32)k[8]<<8);
++ case 8 : b += ((__u32)k[7]<<24);
++ case 7 : b += ((__u32)k[6]<<16);
++ case 6 : b += ((__u32)k[5]<<8);
++ case 5 : b += k[4];
++ case 4 : a += ((__u32)k[3]<<24);
++ case 3 : a += ((__u32)k[2]<<16);
++ case 2 : a += ((__u32)k[1]<<8);
++ case 1 : a += k[0];
++ };
++
++ __jhash_mix(a,b,c);
++
++ return c;
++}
++
++/* A special optimized version that handles 1 or more of __u32s.
++ * The length parameter here is the number of __u32s in the key.
++ */
++static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++{
++ __u32 a, b, c, len;
++
++ a = b = JHASH_GOLDEN_RATIO;
++ c = initval;
++ len = length;
++
++ while (len >= 3) {
++ a += k[0];
++ b += k[1];
++ c += k[2];
++ __jhash_mix(a, b, c);
++ k += 3; len -= 3;
++ }
++
++ c += length * 4;
++
++ switch (len) {
++ case 2 : b += k[1];
++ case 1 : a += k[0];
++ };
++
++ __jhash_mix(a,b,c);
++
++ return c;
++}
++
++
++/* A special ultra-optimized versions that knows they are hashing exactly
++ * 3, 2 or 1 word(s).
++ *
++ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
++ * done at the end is not done here.
++ */
++static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++{
++ a += JHASH_GOLDEN_RATIO;
++ b += JHASH_GOLDEN_RATIO;
++ c += initval;
++
++ __jhash_mix(a, b, c);
++
++ return c;
++}
++
++static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++{
++ return jhash_3words(a, b, 0, initval);
++}
++
++static inline __u32 jhash_1word(__u32 a, __u32 initval)
++{
++ return jhash_3words(a, 0, 0, initval);
++}
++
++#endif /* _LINUX_IPSET_JHASH_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_macipmap.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_macipmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_macipmap.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,38 @@
++#ifndef __IP_SET_MACIPMAP_H
++#define __IP_SET_MACIPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "macipmap"
++#define MAX_RANGE 0x0000FFFF
++
++/* general flags */
++#define IPSET_MACIP_MATCHUNSET 1
++
++/* per ip flags */
++#define IPSET_MACIP_ISSET 1
++
++struct ip_set_macipmap {
++ void *members; /* the macipmap proper */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t flags;
++};
++
++struct ip_set_req_macipmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++ u_int32_t flags;
++};
++
++struct ip_set_req_macipmap {
++ ip_set_ip_t ip;
++ unsigned char ethernet[ETH_ALEN];
++};
++
++struct ip_set_macip {
++ unsigned short flags;
++ unsigned char ethernet[ETH_ALEN];
++};
++
++#endif /* __IP_SET_MACIPMAP_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_malloc.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_malloc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_malloc.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,116 @@
++#ifndef _IP_SET_MALLOC_H
++#define _IP_SET_MALLOC_H
++
++#ifdef __KERNEL__
++
++/* Memory allocation and deallocation */
++static size_t max_malloc_size = 0;
++
++static inline void init_max_malloc_size(void)
++{
++#define CACHE(x) max_malloc_size = x;
++#include <linux/kmalloc_sizes.h>
++#undef CACHE
++}
++
++static inline void * ip_set_malloc(size_t bytes)
++{
++ if (bytes > max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ if (bytes > max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
++struct harray {
++ size_t max_elements;
++ void *arrays[0];
++};
++
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, int flags)
++{
++ struct harray *harray;
++ size_t max_elements, size, i, j;
++
++ if (!max_malloc_size)
++ init_max_malloc_size();
++
++ if (typesize > max_malloc_size)
++ return NULL;
++
++ max_elements = max_malloc_size/typesize;
++ size = hashsize/max_elements;
++ if (hashsize % max_elements)
++ size++;
++
++ /* Last pointer signals end of arrays */
++ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
++ flags);
++
++ if (!harray)
++ return NULL;
++
++ for (i = 0; i < size - 1; i++) {
++ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
++ if (!harray->arrays[i])
++ goto undo;
++ memset(harray->arrays[i], 0, max_elements * typesize);
++ }
++ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
++ flags);
++ if (!harray->arrays[i])
++ goto undo;
++ memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
++
++ harray->max_elements = max_elements;
++ harray->arrays[size] = NULL;
++
++ return (void *)harray;
++
++ undo:
++ for (j = 0; j < i; j++) {
++ kfree(harray->arrays[j]);
++ }
++ kfree(harray);
++ return NULL;
++}
++
++static inline void harray_free(void *h)
++{
++ struct harray *harray = (struct harray *) h;
++ size_t i;
++
++ for (i = 0; harray->arrays[i] != NULL; i++)
++ kfree(harray->arrays[i]);
++ kfree(harray);
++}
++
++static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
++{
++ struct harray *harray = (struct harray *) h;
++ size_t i;
++
++ for (i = 0; harray->arrays[i+1] != NULL; i++)
++ memset(harray->arrays[i], 0, harray->max_elements * typesize);
++ memset(harray->arrays[i], 0,
++ (hashsize - i * harray->max_elements) * typesize);
++}
++
++#define HARRAY_ELEM(h, type, which) \
++({ \
++ struct harray *__h = (struct harray *)(h); \
++ ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
++ + (which)%(__h)->max_elements); \
++})
++
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_MALLOC_H*/
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_nethash.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_nethash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_nethash.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,55 @@
++#ifndef __IP_SET_NETHASH_H
++#define __IP_SET_NETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "nethash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_nethash {
++ ip_set_ip_t *members; /* the nethash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ unsigned char cidr[30]; /* CIDR sizes */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_nethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++};
++
++struct ip_set_req_nethash {
++ ip_set_ip_t ip;
++ unsigned char cidr;
++};
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++#endif /* __IP_SET_NETHASH_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_portmap.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ip_set_portmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ip_set_portmap.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,25 @@
++#ifndef __IP_SET_PORTMAP_H
++#define __IP_SET_PORTMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "portmap"
++#define MAX_RANGE 0x0000FFFF
++#define INVALID_PORT (MAX_RANGE + 1)
++
++struct ip_set_portmap {
++ void *members; /* the portmap proper */
++ ip_set_ip_t first_port; /* host byte order, included in range */
++ ip_set_ip_t last_port; /* host byte order, included in range */
++};
++
++struct ip_set_req_portmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_portmap {
++ ip_set_ip_t port;
++};
++
++#endif /* __IP_SET_PORTMAP_H */
+diff -Nru ./linux-2.6.22.4/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ipt_set.h
+--- ./linux-2.6.22.4/include/linux/netfilter_ipv4/ipt_set.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/include/linux/netfilter_ipv4/ipt_set.h 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,21 @@
++#ifndef _IPT_SET_H
++#define _IPT_SET_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++struct ipt_set_info {
++ ip_set_id_t index;
++ u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
++};
++
++/* match info */
++struct ipt_set_info_match {
++ struct ipt_set_info match_set;
++};
++
++struct ipt_set_info_target {
++ struct ipt_set_info add_set;
++ struct ipt_set_info del_set;
++};
++
++#endif /*_IPT_SET_H*/
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,2003 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module for IP set management */
++
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kmod.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/random.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <asm/semaphore.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/ip_set.h>
++
++static struct list_head set_type_list; /* all registered sets */
++static struct ip_set **ip_set_list; /* all individual sets */
++static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
++static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
++static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
++static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
++static struct list_head *ip_set_hash; /* hash of bindings */
++static unsigned int ip_set_hash_random; /* random seed */
++
++/*
++ * Sets are identified either by the index in ip_set_list or by id.
++ * The id never changes and is used to find a key in the hash.
++ * The index may change by swapping and used at all other places
++ * (set/SET netfilter modules, binding value, etc.)
++ *
++ * Userspace requests are serialized by ip_set_mutex and sets can
++ * be deleted only from userspace. Therefore ip_set_list locking
++ * must obey the following rules:
++ *
++ * - kernel requests: read and write locking mandatory
++ * - user requests: read locking optional, write locking mandatory
++ */
++
++static inline void
++__ip_set_get(ip_set_id_t index)
++{
++ atomic_inc(&ip_set_list[index]->ref);
++}
++
++static inline void
++__ip_set_put(ip_set_id_t index)
++{
++ atomic_dec(&ip_set_list[index]->ref);
++}
++
++/*
++ * Binding routines
++ */
++
++static inline struct ip_set_hash *
++__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
++{
++ struct ip_set_hash *set_hash;
++
++ list_for_each_entry(set_hash, &ip_set_hash[key], list)
++ if (set_hash->id == id && set_hash->ip == ip)
++ return set_hash;
++
++ return NULL;
++}
++
++static ip_set_id_t
++ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++
++ ASSERT_READ_LOCK(&ip_set_lock);
++ IP_SET_ASSERT(ip_set_list[id]);
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++
++ set_hash = __ip_set_find(key, id, ip);
++
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip),
++ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
++}
++
++static inline void
++__set_hash_del(struct ip_set_hash *set_hash)
++{
++ ASSERT_WRITE_LOCK(&ip_set_lock);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++
++ __ip_set_put(set_hash->binding);
++ list_del(&set_hash->list);
++ kfree(set_hash);
++}
++
++static int
++ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++
++ IP_SET_ASSERT(ip_set_list[id]);
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++ write_lock_bh(&ip_set_lock);
++ set_hash = __ip_set_find(key, id, ip);
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip),
++ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++ if (set_hash != NULL)
++ __set_hash_del(set_hash);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++}
++
++static int
++ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++ int ret = 0;
++
++ IP_SET_ASSERT(ip_set_list[id]);
++ IP_SET_ASSERT(ip_set_list[binding]);
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip), ip_set_list[binding]->name);
++ write_lock_bh(&ip_set_lock);
++ set_hash = __ip_set_find(key, id, ip);
++ if (!set_hash) {
++ set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
++ if (!set_hash) {
++ ret = -ENOMEM;
++ goto unlock;
++ }
++ INIT_LIST_HEAD(&set_hash->list);
++ set_hash->id = id;
++ set_hash->ip = ip;
++ list_add(&set_hash->list, &ip_set_hash[key]);
++ } else {
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ DP("overwrite binding: %s",
++ ip_set_list[set_hash->binding]->name);
++ __ip_set_put(set_hash->binding);
++ }
++ set_hash->binding = binding;
++ __ip_set_get(set_hash->binding);
++ DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
++ key, id, ip_set_list[id]->name,
++ HIPQUAD(ip), binding, ip_set_list[binding]->name);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return ret;
++}
++
++#define FOREACH_HASH_DO(fn, args...) \
++({ \
++ ip_set_id_t __key; \
++ struct ip_set_hash *__set_hash; \
++ \
++ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
++ list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
++ fn(__set_hash , ## args); \
++ } \
++})
++
++#define FOREACH_HASH_RW_DO(fn, args...) \
++({ \
++ ip_set_id_t __key; \
++ struct ip_set_hash *__set_hash, *__n; \
++ \
++ ASSERT_WRITE_LOCK(&ip_set_lock); \
++ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
++ list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
++ fn(__set_hash , ## args); \
++ } \
++})
++
++/* Add, del and test set entries from kernel */
++
++#define follow_bindings(index, set, ip) \
++((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
++ || (index = (set)->binding) != IP_SET_INVALID_ID)
++
++int
++ip_set_testip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ read_lock_bh(&set->lock);
++ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
++ read_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while (res > 0
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++
++ return res;
++}
++
++void
++ip_set_addip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ retry:
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ write_lock_bh(&set->lock);
++ res = set->type->addip_kernel(set, skb, &ip, flags, i++);
++ write_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while ((res == 0 || res == -EEXIST)
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++
++ if (res == -EAGAIN
++ && set->type->retry
++ && (res = set->type->retry(set)) == 0)
++ goto retry;
++}
++
++void
++ip_set_delip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ write_lock_bh(&set->lock);
++ res = set->type->delip_kernel(set, skb, &ip, flags, i++);
++ write_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while ((res == 0 || res == -EEXIST)
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++}
++
++/* Register and deregister settype */
++
++static inline struct ip_set_type *
++find_set_type(const char *name)
++{
++ struct ip_set_type *set_type;
++
++ list_for_each_entry(set_type, &set_type_list, list)
++ if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
++ return set_type;
++ return NULL;
++}
++
++int
++ip_set_register_set_type(struct ip_set_type *set_type)
++{
++ int ret = 0;
++
++ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
++ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
++ set_type->typename,
++ set_type->protocol_version,
++ IP_SET_PROTOCOL_VERSION);
++ return -EINVAL;
++ }
++
++ write_lock_bh(&ip_set_lock);
++ if (find_set_type(set_type->typename)) {
++ /* Duplicate! */
++ ip_set_printk("'%s' already registered!",
++ set_type->typename);
++ ret = -EINVAL;
++ goto unlock;
++ }
++ if (!try_module_get(THIS_MODULE)) {
++ ret = -EFAULT;
++ goto unlock;
++ }
++ list_add(&set_type->list, &set_type_list);
++ DP("'%s' registered.", set_type->typename);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return ret;
++}
++
++void
++ip_set_unregister_set_type(struct ip_set_type *set_type)
++{
++ write_lock_bh(&ip_set_lock);
++ if (!find_set_type(set_type->typename)) {
++ ip_set_printk("'%s' not registered?",
++ set_type->typename);
++ goto unlock;
++ }
++ list_del(&set_type->list);
++ module_put(THIS_MODULE);
++ DP("'%s' unregistered.", set_type->typename);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++
++}
++
++/*
++ * Userspace routines
++ */
++
++/*
++ * Find set by name, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byname(const char *name)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ down(&ip_set_app_mutex);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strcmp(ip_set_list[i]->name, name) == 0) {
++ __ip_set_get(i);
++ index = i;
++ break;
++ }
++ }
++ up(&ip_set_app_mutex);
++ return index;
++}
++
++/*
++ * Find set by index, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byindex(ip_set_id_t index)
++{
++ down(&ip_set_app_mutex);
++
++ if (index >= ip_set_max)
++ return IP_SET_INVALID_ID;
++
++ if (ip_set_list[index])
++ __ip_set_get(index);
++ else
++ index = IP_SET_INVALID_ID;
++
++ up(&ip_set_app_mutex);
++ return index;
++}
++
++/*
++ * If the given set pointer points to a valid set, decrement
++ * reference count by 1. The caller shall not assume the index
++ * to be valid, after calling this function.
++ */
++void ip_set_put(ip_set_id_t index)
++{
++ down(&ip_set_app_mutex);
++ if (ip_set_list[index])
++ __ip_set_put(index);
++ up(&ip_set_app_mutex);
++}
++
++/* Find a set by name or index */
++static ip_set_id_t
++ip_set_find_byname(const char *name)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strcmp(ip_set_list[i]->name, name) == 0) {
++ index = i;
++ break;
++ }
++ }
++ return index;
++}
++
++static ip_set_id_t
++ip_set_find_byindex(ip_set_id_t index)
++{
++ if (index >= ip_set_max || ip_set_list[index] == NULL)
++ index = IP_SET_INVALID_ID;
++
++ return index;
++}
++
++/*
++ * Add, del, test, bind and unbind
++ */
++
++static inline int
++__ip_set_testip(struct ip_set *set,
++ const void *data,
++ size_t size,
++ ip_set_ip_t *ip)
++{
++ int res;
++
++ read_lock_bh(&set->lock);
++ res = set->type->testip(set, data, size, ip);
++ read_unlock_bh(&set->lock);
++
++ return res;
++}
++
++static int
++__ip_set_addip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ do {
++ write_lock_bh(&set->lock);
++ res = set->type->addip(set, data, size, &ip);
++ write_unlock_bh(&set->lock);
++ } while (res == -EAGAIN
++ && set->type->retry
++ && (res = set->type->retry(set)) == 0);
++
++ return res;
++}
++
++static int
++ip_set_addip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++
++ return __ip_set_addip(index,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt));
++}
++
++static int
++ip_set_delip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ write_lock_bh(&set->lock);
++ res = set->type->delip(set,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt),
++ &ip);
++ write_unlock_bh(&set->lock);
++
++ return res;
++}
++
++static int
++ip_set_testip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt),
++ &ip);
++
++ return (res > 0 ? -EEXIST : res);
++}
++
++static int
++ip_set_bindip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_req_bind *req_bind;
++ ip_set_id_t binding;
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of a set */
++ char *binding_name;
++
++ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++ return -EINVAL;
++
++ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
++ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ binding = ip_set_find_byname(binding_name);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ write_lock_bh(&ip_set_lock);
++ /* Sets as binding values are referenced */
++ if (set->binding != IP_SET_INVALID_ID)
++ __ip_set_put(set->binding);
++ set->binding = binding;
++ __ip_set_get(set->binding);
++ write_unlock_bh(&ip_set_lock);
++
++ return 0;
++ }
++ binding = ip_set_find_byname(req_bind->binding);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++ DP("set %s, ip: %u.%u.%u.%u, binding %s",
++ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++
++ if (res >= 0)
++ res = ip_set_hash_add(set->id, ip, binding);
++
++ return res;
++}
++
++#define FOREACH_SET_DO(fn, args...) \
++({ \
++ ip_set_id_t __i; \
++ struct ip_set *__set; \
++ \
++ for (__i = 0; __i < ip_set_max; __i++) { \
++ __set = ip_set_list[__i]; \
++ if (__set != NULL) \
++ fn(__set , ##args); \
++ } \
++})
++
++static inline void
++__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
++{
++ if (set_hash->id == id)
++ __set_hash_del(set_hash);
++}
++
++static inline void
++__unbind_default(struct ip_set *set)
++{
++ if (set->binding != IP_SET_INVALID_ID) {
++ /* Sets as binding values are referenced */
++ __ip_set_put(set->binding);
++ set->binding = IP_SET_INVALID_ID;
++ }
++}
++
++static int
++ip_set_unbindip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set;
++ struct ip_set_req_bind *req_bind;
++ ip_set_ip_t ip;
++ int res;
++
++ DP("");
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ DP("%u %s", index, req_bind->binding);
++ if (index == IP_SET_INVALID_ID) {
++ /* unbind :all: */
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of sets */
++ write_lock_bh(&ip_set_lock);
++ FOREACH_SET_DO(__unbind_default);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all bindings of all sets*/
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ }
++ DP("unreachable reached!");
++ return -EINVAL;
++ }
++
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of set */
++ ip_set_id_t binding = ip_set_find_byindex(set->binding);
++
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ write_lock_bh(&ip_set_lock);
++ /* Sets in hash values are referenced */
++ __ip_set_put(set->binding);
++ set->binding = IP_SET_INVALID_ID;
++ write_unlock_bh(&ip_set_lock);
++
++ return 0;
++ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all bindings */
++
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ }
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++
++ DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
++ if (res >= 0)
++ res = ip_set_hash_del(set->id, ip);
++
++ return res;
++}
++
++static int
++ip_set_testbind(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_req_bind *req_bind;
++ ip_set_id_t binding;
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of set */
++ char *binding_name;
++
++ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++ return -EINVAL;
++
++ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
++ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ binding = ip_set_find_byname(binding_name);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ res = (set->binding == binding) ? -EEXIST : 0;
++
++ return res;
++ }
++ binding = ip_set_find_byname(req_bind->binding);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++ DP("set %s, ip: %u.%u.%u.%u, binding %s",
++ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++
++ if (res >= 0)
++ res = (ip_set_find_in_hash(set->id, ip) == binding)
++ ? -EEXIST : 0;
++
++ return res;
++}
++
++static struct ip_set_type *
++find_set_type_rlock(const char *typename)
++{
++ struct ip_set_type *type;
++
++ read_lock_bh(&ip_set_lock);
++ type = find_set_type(typename);
++ if (type == NULL)
++ read_unlock_bh(&ip_set_lock);
++
++ return type;
++}
++
++static int
++find_free_id(const char *name,
++ ip_set_id_t *index,
++ ip_set_id_t *id)
++{
++ ip_set_id_t i;
++
++ *id = IP_SET_INVALID_ID;
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] == NULL) {
++ if (*id == IP_SET_INVALID_ID)
++ *id = *index = i;
++ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ /* Name clash */
++ return -EEXIST;
++ }
++ if (*id == IP_SET_INVALID_ID)
++ /* No free slot remained */
++ return -ERANGE;
++ /* Check that index is usable as id (swapping) */
++ check:
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && ip_set_list[i]->id == *id) {
++ *id = i;
++ goto check;
++ }
++ }
++ return 0;
++}
++
++/*
++ * Create a set
++ */
++static int
++ip_set_create(const char *name,
++ const char *typename,
++ ip_set_id_t restore,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set;
++ ip_set_id_t index = 0, id;
++ int res = 0;
++
++ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++ /*
++ * First, and without any locks, allocate and initialize
++ * a normal base set structure.
++ */
++ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
++ if (!set)
++ return -ENOMEM;
++ set->lock = RW_LOCK_UNLOCKED;
++ strncpy(set->name, name, IP_SET_MAXNAMELEN);
++ set->binding = IP_SET_INVALID_ID;
++ atomic_set(&set->ref, 0);
++
++ /*
++ * Next, take the &ip_set_lock, check that we know the type,
++ * and take a reference on the type, to make sure it
++ * stays available while constructing our new set.
++ *
++ * After referencing the type, we drop the &ip_set_lock,
++ * and let the new set construction run without locks.
++ */
++ set->type = find_set_type_rlock(typename);
++ if (set->type == NULL) {
++ /* Try loading the module */
++ char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
++ strcpy(modulename, "ip_set_");
++ strcat(modulename, typename);
++ DP("try to load %s", modulename);
++ request_module(modulename);
++ set->type = find_set_type_rlock(typename);
++ }
++ if (set->type == NULL) {
++ ip_set_printk("no set type '%s', set '%s' not created",
++ typename, name);
++ res = -ENOENT;
++ goto out;
++ }
++ if (!try_module_get(set->type->me)) {
++ read_unlock_bh(&ip_set_lock);
++ res = -EFAULT;
++ goto out;
++ }
++ read_unlock_bh(&ip_set_lock);
++
++ /*
++ * Without holding any locks, create private part.
++ */
++ res = set->type->create(set, data, size);
++ if (res != 0)
++ goto put_out;
++
++ /* BTW, res==0 here. */
++
++ /*
++ * Here, we have a valid, constructed set. &ip_set_lock again,
++ * find free id/index and check that it is not already in
++ * ip_set_list.
++ */
++ write_lock_bh(&ip_set_lock);
++ if ((res = find_free_id(set->name, &index, &id)) != 0) {
++ DP("no free id!");
++ goto cleanup;
++ }
++
++ /* Make sure restore gets the same index */
++ if (restore != IP_SET_INVALID_ID && index != restore) {
++ DP("Can't restore, sets are screwed up");
++ res = -ERANGE;
++ goto cleanup;
++ }
++
++ /*
++ * Finally! Add our shiny new set to the list, and be done.
++ */
++ DP("create: '%s' created with index %u, id %u!", set->name, index, id);
++ set->id = id;
++ ip_set_list[index] = set;
++ write_unlock_bh(&ip_set_lock);
++ return res;
++
++ cleanup:
++ write_unlock_bh(&ip_set_lock);
++ set->type->destroy(set);
++ put_out:
++ module_put(set->type->me);
++ out:
++ kfree(set);
++ return res;
++}
++
++/*
++ * Destroy a given existing set
++ */
++static void
++ip_set_destroy_set(ip_set_id_t index)
++{
++ struct ip_set *set = ip_set_list[index];
++
++ IP_SET_ASSERT(set);
++ DP("set: %s", set->name);
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++ if (set->binding != IP_SET_INVALID_ID)
++ __ip_set_put(set->binding);
++ ip_set_list[index] = NULL;
++ write_unlock_bh(&ip_set_lock);
++
++ /* Must call it without holding any lock */
++ set->type->destroy(set);
++ module_put(set->type->me);
++ kfree(set);
++}
++
++/*
++ * Destroy a set - or all sets
++ * Sets must not be referenced/used.
++ */
++static int
++ip_set_destroy(ip_set_id_t index)
++{
++ ip_set_id_t i;
++
++ /* ref modification always protected by the mutex */
++ if (index != IP_SET_INVALID_ID) {
++ if (atomic_read(&ip_set_list[index]->ref))
++ return -EBUSY;
++ ip_set_destroy_set(index);
++ } else {
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && (atomic_read(&ip_set_list[i]->ref)))
++ return -EBUSY;
++ }
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL)
++ ip_set_destroy_set(i);
++ }
++ }
++ return 0;
++}
++
++static void
++ip_set_flush_set(struct ip_set *set)
++{
++ DP("set: %s %u", set->name, set->id);
++
++ write_lock_bh(&set->lock);
++ set->type->flush(set);
++ write_unlock_bh(&set->lock);
++}
++
++/*
++ * Flush data in a set - or in all sets
++ */
++static int
++ip_set_flush(ip_set_id_t index)
++{
++ if (index != IP_SET_INVALID_ID) {
++ IP_SET_ASSERT(ip_set_list[index]);
++ ip_set_flush_set(ip_set_list[index]);
++ } else
++ FOREACH_SET_DO(ip_set_flush_set);
++
++ return 0;
++}
++
++/* Rename a set */
++static int
++ip_set_rename(ip_set_id_t index, const char *name)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_id_t i;
++ int res = 0;
++
++ DP("set: %s to %s", set->name, name);
++ write_lock_bh(&ip_set_lock);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strncmp(ip_set_list[i]->name,
++ name,
++ IP_SET_MAXNAMELEN - 1) == 0) {
++ res = -EEXIST;
++ goto unlock;
++ }
++ }
++ strncpy(set->name, name, IP_SET_MAXNAMELEN);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return res;
++}
++
++/*
++ * Swap two sets so that name/index points to the other.
++ * References are also swapped.
++ */
++static int
++ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
++{
++ struct ip_set *from = ip_set_list[from_index];
++ struct ip_set *to = ip_set_list[to_index];
++ char from_name[IP_SET_MAXNAMELEN];
++ u_int32_t from_ref;
++
++ DP("set: %s to %s", from->name, to->name);
++ /* Features must not change. Artifical restriction. */
++ if (from->type->features != to->type->features)
++ return -ENOEXEC;
++
++ /* No magic here: ref munging protected by the mutex */
++ write_lock_bh(&ip_set_lock);
++ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
++ from_ref = atomic_read(&from->ref);
++
++ strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
++ atomic_set(&from->ref, atomic_read(&to->ref));
++ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
++ atomic_set(&to->ref, from_ref);
++
++ ip_set_list[from_index] = to;
++ ip_set_list[to_index] = from;
++
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++}
++
++/*
++ * List set data
++ */
++
++static inline void
++__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
++ ip_set_id_t id, size_t *size)
++{
++ if (set_hash->id == id)
++ *size += sizeof(struct ip_set_hash_list);
++}
++
++static inline void
++__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
++ ip_set_id_t id, size_t *size)
++{
++ if (set_hash->id == id)
++ *size += sizeof(struct ip_set_hash_save);
++}
++
++static inline void
++__set_hash_bindings(struct ip_set_hash *set_hash,
++ ip_set_id_t id, void *data, int *used)
++{
++ if (set_hash->id == id) {
++ struct ip_set_hash_list *hash_list =
++ (struct ip_set_hash_list *)(data + *used);
++
++ hash_list->ip = set_hash->ip;
++ hash_list->binding = set_hash->binding;
++ *used += sizeof(struct ip_set_hash_list);
++ }
++}
++
++static int ip_set_list_set(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_list *set_list;
++
++ /* Pointer to our header */
++ set_list = (struct ip_set_list *) (data + *used);
++
++ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
++
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_list) > len)
++ goto not_enough_mem;
++ *used += sizeof(struct ip_set_list);
++
++ read_lock_bh(&set->lock);
++ /* Get and ensure set specific header size */
++ set_list->header_size = set->type->header_size;
++ if (*used + set_list->header_size > len)
++ goto unlock_set;
++
++ /* Fill in the header */
++ set_list->index = index;
++ set_list->binding = set->binding;
++ set_list->ref = atomic_read(&set->ref);
++
++ /* Fill in set spefific header data */
++ set->type->list_header(set, data + *used);
++ *used += set_list->header_size;
++
++ /* Get and ensure set specific members size */
++ set_list->members_size = set->type->list_members_size(set);
++ if (*used + set_list->members_size > len)
++ goto unlock_set;
++
++ /* Fill in set spefific members data */
++ set->type->list_members(set, data + *used);
++ *used += set_list->members_size;
++ read_unlock_bh(&set->lock);
++
++ /* Bindings */
++
++ /* Get and ensure set specific bindings size */
++ set_list->bindings_size = 0;
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ set->id, &set_list->bindings_size);
++ if (*used + set_list->bindings_size > len)
++ goto not_enough_mem;
++
++ /* Fill in set spefific bindings data */
++ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
++
++ return 0;
++
++ unlock_set:
++ read_unlock_bh(&set->lock);
++ not_enough_mem:
++ DP("not enough mem, try again");
++ return -EAGAIN;
++}
++
++/*
++ * Save sets
++ */
++static int ip_set_save_set(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ struct ip_set *set;
++ struct ip_set_save *set_save;
++
++ /* Pointer to our header */
++ set_save = (struct ip_set_save *) (data + *used);
++
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_save) > len)
++ goto not_enough_mem;
++ *used += sizeof(struct ip_set_save);
++
++ set = ip_set_list[index];
++ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ data, data + *used);
++
++ read_lock_bh(&set->lock);
++ /* Get and ensure set specific header size */
++ set_save->header_size = set->type->header_size;
++ if (*used + set_save->header_size > len)
++ goto unlock_set;
++
++ /* Fill in the header */
++ set_save->index = index;
++ set_save->binding = set->binding;
++
++ /* Fill in set spefific header data */
++ set->type->list_header(set, data + *used);
++ *used += set_save->header_size;
++
++ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
++ set_save->header_size, data, data + *used);
++ /* Get and ensure set specific members size */
++ set_save->members_size = set->type->list_members_size(set);
++ if (*used + set_save->members_size > len)
++ goto unlock_set;
++
++ /* Fill in set spefific members data */
++ set->type->list_members(set, data + *used);
++ *used += set_save->members_size;
++ read_unlock_bh(&set->lock);
++ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
++ set_save->members_size, data, data + *used);
++ return 0;
++
++ unlock_set:
++ read_unlock_bh(&set->lock);
++ not_enough_mem:
++ DP("not enough mem, try again");
++ return -EAGAIN;
++}
++
++static inline void
++__set_hash_save_bindings(struct ip_set_hash *set_hash,
++ ip_set_id_t id,
++ void *data,
++ int *used,
++ int len,
++ int *res)
++{
++ if (*res == 0
++ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
++ struct ip_set_hash_save *hash_save =
++ (struct ip_set_hash_save *)(data + *used);
++ /* Ensure bindings size */
++ if (*used + sizeof(struct ip_set_hash_save) > len) {
++ *res = -ENOMEM;
++ return;
++ }
++ hash_save->id = set_hash->id;
++ hash_save->ip = set_hash->ip;
++ hash_save->binding = set_hash->binding;
++ *used += sizeof(struct ip_set_hash_save);
++ }
++}
++
++static int ip_set_save_bindings(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ int res = 0;
++ struct ip_set_save *set_save;
++
++ DP("used %u, len %u", *used, len);
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_save) > len)
++ return -ENOMEM;
++
++ /* Marker */
++ set_save = (struct ip_set_save *) (data + *used);
++ set_save->index = IP_SET_INVALID_ID;
++ set_save->header_size = 0;
++ set_save->members_size = 0;
++ *used += sizeof(struct ip_set_save);
++
++ DP("marker added used %u, len %u", *used, len);
++ /* Fill in bindings data */
++ if (index != IP_SET_INVALID_ID)
++ /* Sets are identified by id in hash */
++ index = ip_set_list[index]->id;
++ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
++
++ return res;
++}
++
++/*
++ * Restore sets
++ */
++static int ip_set_restore(void *data,
++ int len)
++{
++ int res = 0;
++ int line = 0, used = 0, members_size;
++ struct ip_set *set;
++ struct ip_set_hash_save *hash_save;
++ struct ip_set_restore *set_restore;
++ ip_set_id_t index;
++
++ /* Loop to restore sets */
++ while (1) {
++ line++;
++
++ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++ /* Get and ensure header size */
++ if (used + sizeof(struct ip_set_restore) > len)
++ return line;
++ set_restore = (struct ip_set_restore *) (data + used);
++ used += sizeof(struct ip_set_restore);
++
++ /* Ensure data size */
++ if (used
++ + set_restore->header_size
++ + set_restore->members_size > len)
++ return line;
++
++ /* Check marker */
++ if (set_restore->index == IP_SET_INVALID_ID) {
++ line--;
++ goto bindings;
++ }
++
++ /* Try to create the set */
++ DP("restore %s %s", set_restore->name, set_restore->typename);
++ res = ip_set_create(set_restore->name,
++ set_restore->typename,
++ set_restore->index,
++ data + used,
++ set_restore->header_size);
++
++ if (res != 0)
++ return line;
++ used += set_restore->header_size;
++
++ index = ip_set_find_byindex(set_restore->index);
++ DP("index %u, restore_index %u", index, set_restore->index);
++ if (index != set_restore->index)
++ return line;
++ /* Try to restore members data */
++ set = ip_set_list[index];
++ members_size = 0;
++ DP("members_size %u reqsize %u",
++ set_restore->members_size, set->type->reqsize);
++ while (members_size + set->type->reqsize <=
++ set_restore->members_size) {
++ line++;
++ DP("members: %u, line %u", members_size, line);
++ res = __ip_set_addip(index,
++ data + used + members_size,
++ set->type->reqsize);
++ if (!(res == 0 || res == -EEXIST))
++ return line;
++ members_size += set->type->reqsize;
++ }
++
++ DP("members_size %u %u",
++ set_restore->members_size, members_size);
++ if (members_size != set_restore->members_size)
++ return line++;
++ used += set_restore->members_size;
++ }
++
++ bindings:
++ /* Loop to restore bindings */
++ while (used < len) {
++ line++;
++
++ DP("restore binding, line %u", line);
++ /* Get and ensure size */
++ if (used + sizeof(struct ip_set_hash_save) > len)
++ return line;
++ hash_save = (struct ip_set_hash_save *) (data + used);
++ used += sizeof(struct ip_set_hash_save);
++
++ /* hash_save->id is used to store the index */
++ index = ip_set_find_byindex(hash_save->id);
++ DP("restore binding index %u, id %u, %u -> %u",
++ index, hash_save->id, hash_save->ip, hash_save->binding);
++ if (index != hash_save->id)
++ return line;
++ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
++ DP("corrupt binding set index %u", hash_save->binding);
++ return line;
++ }
++ set = ip_set_list[hash_save->id];
++ /* Null valued IP means default binding */
++ if (hash_save->ip)
++ res = ip_set_hash_add(set->id,
++ hash_save->ip,
++ hash_save->binding);
++ else {
++ IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
++ write_lock_bh(&ip_set_lock);
++ set->binding = hash_save->binding;
++ __ip_set_get(set->binding);
++ write_unlock_bh(&ip_set_lock);
++ DP("default binding: %u", set->binding);
++ }
++ if (res != 0)
++ return line;
++ }
++ if (used != len)
++ return line;
++
++ return 0;
++}
++
++static int
++ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
++{
++ void *data;
++ int res = 0; /* Assume OK */
++ unsigned *op;
++ struct ip_set_req_adt *req_adt;
++ ip_set_id_t index = IP_SET_INVALID_ID;
++ int (*adtfn)(ip_set_id_t index,
++ const void *data, size_t size);
++ struct fn_table {
++ int (*fn)(ip_set_id_t index,
++ const void *data, size_t size);
++ } adtfn_table[] =
++ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
++ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
++ };
++
++ DP("optval=%d, user=%p, len=%d", optval, user, len);
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (optval != SO_IP_SET)
++ return -EBADF;
++ if (len <= sizeof(unsigned)) {
++ ip_set_printk("short userdata (want >%zu, got %u)",
++ sizeof(unsigned), len);
++ return -EINVAL;
++ }
++ data = vmalloc(len);
++ if (!data) {
++ DP("out of mem for %u bytes", len);
++ return -ENOMEM;
++ }
++ if (copy_from_user(data, user, len) != 0) {
++ res = -EFAULT;
++ goto done;
++ }
++ if (down_interruptible(&ip_set_app_mutex)) {
++ res = -EINTR;
++ goto done;
++ }
++
++ op = (unsigned *)data;
++ DP("op=%x", *op);
++
++ if (*op < IP_SET_OP_VERSION) {
++ /* Check the version at the beginning of operations */
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++ res = -EPROTO;
++ goto done;
++ }
++ }
++
++ switch (*op) {
++ case IP_SET_OP_CREATE:{
++ struct ip_set_req_create *req_create
++ = (struct ip_set_req_create *) data;
++
++ if (len < sizeof(struct ip_set_req_create)) {
++ ip_set_printk("short CREATE data (want >=%zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++ res = ip_set_create(req_create->name,
++ req_create->typename,
++ IP_SET_INVALID_ID,
++ data + sizeof(struct ip_set_req_create),
++ len - sizeof(struct ip_set_req_create));
++ goto done;
++ }
++ case IP_SET_OP_DESTROY:{
++ struct ip_set_req_std *req_destroy
++ = (struct ip_set_req_std *) data;
++
++ if (len != sizeof(struct ip_set_req_std)) {
++ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
++ sizeof(struct ip_set_req_std), len);
++ res = -EINVAL;
++ goto done;
++ }
++ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ /* Destroy all sets */
++ index = IP_SET_INVALID_ID;
++ } else {
++ req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_destroy->name);
++
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++
++ res = ip_set_destroy(index);
++ goto done;
++ }
++ case IP_SET_OP_FLUSH:{
++ struct ip_set_req_std *req_flush =
++ (struct ip_set_req_std *) data;
++
++ if (len != sizeof(struct ip_set_req_std)) {
++ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
++ sizeof(struct ip_set_req_std), len);
++ res = -EINVAL;
++ goto done;
++ }
++ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all sets */
++ index = IP_SET_INVALID_ID;
++ } else {
++ req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_flush->name);
++
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ res = ip_set_flush(index);
++ goto done;
++ }
++ case IP_SET_OP_RENAME:{
++ struct ip_set_req_create *req_rename
++ = (struct ip_set_req_create *) data;
++
++ if (len != sizeof(struct ip_set_req_create)) {
++ ip_set_printk("invalid RENAME data (want %zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ index = ip_set_find_byname(req_rename->name);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ res = ip_set_rename(index, req_rename->typename);
++ goto done;
++ }
++ case IP_SET_OP_SWAP:{
++ struct ip_set_req_create *req_swap
++ = (struct ip_set_req_create *) data;
++ ip_set_id_t to_index;
++
++ if (len != sizeof(struct ip_set_req_create)) {
++ ip_set_printk("invalid SWAP data (want %zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ index = ip_set_find_byname(req_swap->name);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ to_index = ip_set_find_byname(req_swap->typename);
++ if (to_index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ res = ip_set_swap(index, to_index);
++ goto done;
++ }
++ default:
++ break; /* Set identified by id */
++ }
++
++ /* There we may have add/del/test/bind/unbind/test_bind operations */
++ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
++ res = -EBADMSG;
++ goto done;
++ }
++ adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
++
++ if (len < sizeof(struct ip_set_req_adt)) {
++ ip_set_printk("short data in adt request (want >=%zu, got %u)",
++ sizeof(struct ip_set_req_adt), len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_adt = (struct ip_set_req_adt *) data;
++
++ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
++ if (!(*op == IP_SET_OP_UNBIND_SET
++ && req_adt->index == IP_SET_INVALID_ID)) {
++ index = ip_set_find_byindex(req_adt->index);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ res = adtfn(index, data, len);
++
++ done:
++ up(&ip_set_app_mutex);
++ vfree(data);
++ if (res > 0)
++ res = 0;
++ DP("final result %d", res);
++ return res;
++}
++
++static int
++ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
++{
++ int res = 0;
++ unsigned *op;
++ ip_set_id_t index = IP_SET_INVALID_ID;
++ void *data;
++ int copylen = *len;
++
++ DP("optval=%d, user=%p, len=%d", optval, user, *len);
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (optval != SO_IP_SET)
++ return -EBADF;
++ if (*len < sizeof(unsigned)) {
++ ip_set_printk("short userdata (want >=%zu, got %d)",
++ sizeof(unsigned), *len);
++ return -EINVAL;
++ }
++ data = vmalloc(*len);
++ if (!data) {
++ DP("out of mem for %d bytes", *len);
++ return -ENOMEM;
++ }
++ if (copy_from_user(data, user, *len) != 0) {
++ res = -EFAULT;
++ goto done;
++ }
++ if (down_interruptible(&ip_set_app_mutex)) {
++ res = -EINTR;
++ goto done;
++ }
++
++ op = (unsigned *) data;
++ DP("op=%x", *op);
++
++ if (*op < IP_SET_OP_VERSION) {
++ /* Check the version at the beginning of operations */
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++ res = -EPROTO;
++ goto done;
++ }
++ }
++
++ switch (*op) {
++ case IP_SET_OP_VERSION: {
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++
++ if (*len != sizeof(struct ip_set_req_version)) {
++ ip_set_printk("invalid VERSION (want %zu, got %d)",
++ sizeof(struct ip_set_req_version),
++ *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_version->version = IP_SET_PROTOCOL_VERSION;
++ res = copy_to_user(user, req_version,
++ sizeof(struct ip_set_req_version));
++ goto done;
++ }
++ case IP_SET_OP_GET_BYNAME: {
++ struct ip_set_req_get_set *req_get
++ = (struct ip_set_req_get_set *) data;
++
++ if (*len != sizeof(struct ip_set_req_get_set)) {
++ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
++ sizeof(struct ip_set_req_get_set), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_get->set.name);
++ req_get->set.index = index;
++ goto copy;
++ }
++ case IP_SET_OP_GET_BYINDEX: {
++ struct ip_set_req_get_set *req_get
++ = (struct ip_set_req_get_set *) data;
++
++ if (*len != sizeof(struct ip_set_req_get_set)) {
++ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
++ sizeof(struct ip_set_req_get_set), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byindex(req_get->set.index);
++ strncpy(req_get->set.name,
++ index == IP_SET_INVALID_ID ? ""
++ : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
++ goto copy;
++ }
++ case IP_SET_OP_ADT_GET: {
++ struct ip_set_req_adt_get *req_get
++ = (struct ip_set_req_adt_get *) data;
++
++ if (*len != sizeof(struct ip_set_req_adt_get)) {
++ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
++ sizeof(struct ip_set_req_adt_get), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_get->set.name);
++ if (index != IP_SET_INVALID_ID) {
++ req_get->set.index = index;
++ strncpy(req_get->typename,
++ ip_set_list[index]->type->typename,
++ IP_SET_MAXNAMELEN - 1);
++ } else {
++ res = -ENOENT;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_MAX_SETS: {
++ struct ip_set_req_max_sets *req_max_sets
++ = (struct ip_set_req_max_sets *) data;
++ ip_set_id_t i;
++
++ if (*len != sizeof(struct ip_set_req_max_sets)) {
++ ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
++ sizeof(struct ip_set_req_max_sets), *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ req_max_sets->set.index = IP_SET_INVALID_ID;
++ } else {
++ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_max_sets->set.index =
++ ip_set_find_byname(req_max_sets->set.name);
++ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ req_max_sets->max_sets = ip_set_max;
++ req_max_sets->sets = 0;
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL)
++ req_max_sets->sets++;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_LIST_SIZE:
++ case IP_SET_OP_SAVE_SIZE: {
++ struct ip_set_req_setnames *req_setnames
++ = (struct ip_set_req_setnames *) data;
++ struct ip_set_name_list *name_list;
++ struct ip_set *set;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_setnames)) {
++ ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_setnames), *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_setnames->size = 0;
++ used = sizeof(struct ip_set_req_setnames);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] == NULL)
++ continue;
++ name_list = (struct ip_set_name_list *)
++ (data + used);
++ used += sizeof(struct ip_set_name_list);
++ if (used > copylen) {
++ res = -EAGAIN;
++ goto done;
++ }
++ set = ip_set_list[i];
++ /* Fill in index, name, etc. */
++ name_list->index = i;
++ name_list->id = set->id;
++ strncpy(name_list->name,
++ set->name,
++ IP_SET_MAXNAMELEN - 1);
++ strncpy(name_list->typename,
++ set->type->typename,
++ IP_SET_MAXNAMELEN - 1);
++ DP("filled %s of type %s, index %u\n",
++ name_list->name, name_list->typename,
++ name_list->index);
++ if (!(req_setnames->index == IP_SET_INVALID_ID
++ || req_setnames->index == i))
++ continue;
++ /* Update size */
++ switch (*op) {
++ case IP_SET_OP_LIST_SIZE: {
++ req_setnames->size += sizeof(struct ip_set_list)
++ + set->type->header_size
++ + set->type->list_members_size(set);
++ /* Sets are identified by id in the hash */
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ set->id, &req_setnames->size);
++ break;
++ }
++ case IP_SET_OP_SAVE_SIZE: {
++ req_setnames->size += sizeof(struct ip_set_save)
++ + set->type->header_size
++ + set->type->list_members_size(set);
++ FOREACH_HASH_DO(__set_hash_bindings_size_save,
++ set->id, &req_setnames->size);
++ break;
++ }
++ default:
++ break;
++ }
++ }
++ if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_LIST: {
++ struct ip_set_req_list *req_list
++ = (struct ip_set_req_list *) data;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_list)) {
++ ip_set_printk("short LIST (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_list), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ index = req_list->index;
++ if (index != IP_SET_INVALID_ID
++ && ip_set_find_byindex(index) != index) {
++ res = -ENOENT;
++ goto done;
++ }
++ used = 0;
++ if (index == IP_SET_INVALID_ID) {
++ /* List all sets */
++ for (i = 0; i < ip_set_max && res == 0; i++) {
++ if (ip_set_list[i] != NULL)
++ res = ip_set_list_set(i, data, &used, *len);
++ }
++ } else {
++ /* List an individual set */
++ res = ip_set_list_set(index, data, &used, *len);
++ }
++ if (res != 0)
++ goto done;
++ else if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_SAVE: {
++ struct ip_set_req_list *req_save
++ = (struct ip_set_req_list *) data;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_list)) {
++ ip_set_printk("short SAVE (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_list), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ index = req_save->index;
++ if (index != IP_SET_INVALID_ID
++ && ip_set_find_byindex(index) != index) {
++ res = -ENOENT;
++ goto done;
++ }
++ used = 0;
++ if (index == IP_SET_INVALID_ID) {
++ /* Save all sets */
++ for (i = 0; i < ip_set_max && res == 0; i++) {
++ if (ip_set_list[i] != NULL)
++ res = ip_set_save_set(i, data, &used, *len);
++ }
++ } else {
++ /* Save an individual set */
++ res = ip_set_save_set(index, data, &used, *len);
++ }
++ if (res == 0)
++ res = ip_set_save_bindings(index, data, &used, *len);
++
++ if (res != 0)
++ goto done;
++ else if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_RESTORE: {
++ struct ip_set_req_setnames *req_restore
++ = (struct ip_set_req_setnames *) data;
++ int line;
++
++ if (*len < sizeof(struct ip_set_req_setnames)
++ || *len != req_restore->size) {
++ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
++ req_restore->size, *len);
++ res = -EINVAL;
++ goto done;
++ }
++ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
++ req_restore->size - sizeof(struct ip_set_req_setnames));
++ DP("ip_set_restore: %u", line);
++ if (line != 0) {
++ res = -EAGAIN;
++ req_restore->size = line;
++ copylen = sizeof(struct ip_set_req_setnames);
++ goto copy;
++ }
++ goto done;
++ }
++ default:
++ res = -EBADMSG;
++ goto done;
++ } /* end of switch(op) */
++
++ copy:
++ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ && ip_set_list[index]
++ ? ip_set_list[index]->name
++ : ":all:", copylen);
++ res = copy_to_user(user, data, copylen);
++
++ done:
++ up(&ip_set_app_mutex);
++ vfree(data);
++ if (res > 0)
++ res = 0;
++ DP("final result %d", res);
++ return res;
++}
++
++static struct nf_sockopt_ops so_set = {
++ .pf = PF_INET,
++ .set_optmin = SO_IP_SET,
++ .set_optmax = SO_IP_SET + 1,
++ .set = &ip_set_sockfn_set,
++ .get_optmin = SO_IP_SET,
++ .get_optmax = SO_IP_SET + 1,
++ .get = &ip_set_sockfn_get,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ .owner = THIS_MODULE,
++#endif
++};
++
++static int max_sets, hash_size;
++module_param(max_sets, int, 0600);
++MODULE_PARM_DESC(max_sets, "maximal number of sets");
++module_param(hash_size, int, 0600);
++MODULE_PARM_DESC(hash_size, "hash size for bindings");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("module implementing core IP set support");
++
++static int __init ip_set_init(void)
++{
++ int res;
++ ip_set_id_t i;
++
++ get_random_bytes(&ip_set_hash_random, 4);
++ if (max_sets)
++ ip_set_max = max_sets;
++ ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
++ if (!ip_set_list) {
++ printk(KERN_ERR "Unable to create ip_set_list\n");
++ return -ENOMEM;
++ }
++ memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
++ if (hash_size)
++ ip_set_bindings_hash_size = hash_size;
++ ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
++ if (!ip_set_hash) {
++ printk(KERN_ERR "Unable to create ip_set_hash\n");
++ vfree(ip_set_list);
++ return -ENOMEM;
++ }
++ for (i = 0; i < ip_set_bindings_hash_size; i++)
++ INIT_LIST_HEAD(&ip_set_hash[i]);
++
++ INIT_LIST_HEAD(&set_type_list);
++
++ res = nf_register_sockopt(&so_set);
++ if (res != 0) {
++ ip_set_printk("SO_SET registry failed: %d", res);
++ vfree(ip_set_list);
++ vfree(ip_set_hash);
++ return res;
++ }
++ return 0;
++}
++
++static void __exit ip_set_fini(void)
++{
++ /* There can't be any existing set or binding */
++ nf_unregister_sockopt(&so_set);
++ vfree(ip_set_list);
++ vfree(ip_set_hash);
++ DP("these are the famous last words");
++}
++
++EXPORT_SYMBOL(ip_set_register_set_type);
++EXPORT_SYMBOL(ip_set_unregister_set_type);
++
++EXPORT_SYMBOL(ip_set_get_byname);
++EXPORT_SYMBOL(ip_set_get_byindex);
++EXPORT_SYMBOL(ip_set_put);
++
++EXPORT_SYMBOL(ip_set_addip_kernel);
++EXPORT_SYMBOL(ip_set_delip_kernel);
++EXPORT_SYMBOL(ip_set_testip_kernel);
++
++module_init(ip_set_init);
++module_exit(ip_set_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_iphash.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_iphash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_iphash.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,429 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_iphash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = ip & map->netmask;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ /* No shortcut at testing - there can be deleted
++ * entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == *hash_ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = *hash_ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __addip((struct ip_set_iphash *) set->data,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t hash_ip, *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_iphash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new hash size */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_iphash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iphash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ tmp->netmask = map->netmask;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_iphash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __addip(tmp, *elem, &hash_ip);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t id, *elem;
++
++ if (!ip)
++ return -ERANGE;
++
++ id = hash_id(set, ip, hash_ip);
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iphash_create *req =
++ (struct ip_set_req_iphash_create *) data;
++ struct ip_set_iphash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_iphash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_iphash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iphash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ map->netmask = req->netmask;
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_req_iphash_create *header =
++ (struct ip_set_req_iphash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++ header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_iphash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_iphash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iphash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_iphash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_iphash);
++}
++
++static void __exit ip_set_iphash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_iphash);
++}
++
++module_init(ip_set_iphash_init);
++module_exit(ip_set_iphash_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_ipmap.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_ipmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_ipmap.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,336 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the single bitmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipmap.h>
++
++static inline ip_set_ip_t
++ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
++{
++ return (ip - map->first_ip)/map->hosts;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
++ return -EEXIST;
++
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
++ return __addip(set, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __addip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
++ return -EEXIST;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_ipmap_create *req =
++ (struct ip_set_req_ipmap_create *) data;
++ struct ip_set_ipmap *map;
++
++ if (size != sizeof(struct ip_set_req_ipmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++ HIPQUAD(req->from), HIPQUAD(req->to));
++
++ if (req->from > req->to) {
++ DP("bad ip range");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipmap));
++ return -ENOMEM;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ map->netmask = req->netmask;
++
++ if (req->netmask == 0xFFFFFFFF) {
++ map->hosts = 1;
++ map->sizeid = map->last_ip - map->first_ip + 1;
++ } else {
++ unsigned int mask_bits, netmask_bits;
++ ip_set_ip_t mask;
++
++ map->first_ip &= map->netmask; /* Should we better bark? */
++
++ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
++ netmask_bits = mask_to_bits(map->netmask);
++
++ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
++ || netmask_bits <= mask_bits)
++ return -ENOEXEC;
++
++ DP("mask_bits %u, netmask_bits %u",
++ mask_bits, netmask_bits);
++ map->hosts = 2 << (32 - netmask_bits - 1);
++ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
++ }
++ if (map->sizeid > MAX_RANGE + 1) {
++ ip_set_printk("range too big (max %d addresses)",
++ MAX_RANGE+1);
++ kfree(map);
++ return -ENOEXEC;
++ }
++ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
++ newbytes = bitmap_bytes(0, map->sizeid - 1);
++ map->members = kmalloc(newbytes, GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ kfree(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_req_ipmap_create *header =
++ (struct ip_set_req_ipmap_create *) data;
++
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++ header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ return bitmap_bytes(0, map->sizeid - 1);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ int bytes = bitmap_bytes(0, map->sizeid - 1);
++
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_ipmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_ipmap),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_ipmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipmap type of IP sets");
++
++static int __init ip_set_ipmap_init(void)
++{
++ return ip_set_register_set_type(&ip_set_ipmap);
++}
++
++static void __exit ip_set_ipmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_ipmap);
++}
++
++module_init(ip_set_ipmap_init);
++module_exit(ip_set_ipmap_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_ipporthash.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_ipporthash.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_ipporthash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_ipporthash.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,581 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++
++static int limit = MAX_RANGE;
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ struct iphdr *iph = ip_hdr(skb);
++#else
++ struct iphdr *iph = skb->nh.iph;
++#endif
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++
++static inline __u32
++jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map =
++ (struct ip_set_ipporthash *) set->data;
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = HASH_IP(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ /* No shortcut at testing - there can be deleted
++ * entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++ int res;
++
++ if (flags[index+1] == 0)
++ return 0;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return 0;
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++ return (res < 0 ? 0 : res);
++
++}
++
++static inline int
++__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == hash_ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = hash_ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = HASH_IP(map, ip, port);
++
++ return __add_haship(map, *hash_ip);
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __addip((struct ip_set_ipporthash *) set->data,
++ req->ip, req->port, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++
++ if (flags[index+1] == 0)
++ return -EINVAL;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __addip((struct ip_set_ipporthash *) set->data,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_ipporthash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new hash size */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipporthash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __add_haship(tmp, *elem);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = hash_id(set, ip, port, hash_ip);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++
++ if (flags[index+1] == 0)
++ return -EINVAL;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_ipporthash_create *req =
++ (struct ip_set_req_ipporthash_create *) data;
++ struct ip_set_ipporthash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_ipporthash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipporthash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_req_ipporthash_create *header =
++ (struct ip_set_req_ipporthash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_ipporthash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_ipporthash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_ipporthash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_ipporthash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_ipporthash);
++}
++
++static void __exit ip_set_ipporthash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_ipporthash);
++}
++
++module_init(ip_set_ipporthash_init);
++module_exit(ip_set_ipporthash_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_iptree.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_iptree.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_iptree.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,612 @@
++/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the iptree type */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++
++#include <linux/netfilter_ipv4/ip_set_iptree.h>
++
++static int limit = MAX_RANGE;
++
++/* Garbage collection interval in seconds: */
++#define IPTREE_GC_TIME 5*60
++/* Sleep so many milliseconds before trying again
++ * to delete the gc timer at destroying/flushing a set */
++#define IPTREE_DESTROY_SLEEP 100
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct kmem_cache *branch_cachep;
++static struct kmem_cache *leaf_cachep;
++#else
++static kmem_cache_t *branch_cachep;
++static kmem_cache_t *leaf_cachep;
++#endif
++
++#if defined(__LITTLE_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[3]; \
++ b = ((unsigned char *)addrp)[2]; \
++ c = ((unsigned char *)addrp)[1]; \
++ d = ((unsigned char *)addrp)[0]; \
++} while (0)
++#elif defined(__BIG_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[0]; \
++ b = ((unsigned char *)addrp)[1]; \
++ c = ((unsigned char *)addrp)[2]; \
++ d = ((unsigned char *)addrp)[3]; \
++} while (0)
++#else
++#error "Please fix asm/byteorder.h"
++#endif /* __LITTLE_ENDIAN */
++
++#define TESTIP_WALK(map, elem, branch) do { \
++ if ((map)->tree[elem]) { \
++ branch = (map)->tree[elem]; \
++ } else \
++ return 0; \
++} while (0)
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++
++ if (!ip)
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
++ TESTIP_WALK(map, a, btree);
++ TESTIP_WALK(btree, b, ctree);
++ TESTIP_WALK(ctree, c, dtree);
++ DP("%lu %lu", dtree->expires[d], jiffies);
++ return dtree->expires[d]
++ && (!map->timeout
++ || time_after(dtree->expires[d], jiffies));
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res;
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++ return (res < 0 ? 0 : res);
++}
++
++#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
++ if ((map)->tree[elem]) { \
++ DP("found %u", elem); \
++ branch = (map)->tree[elem]; \
++ } else { \
++ branch = (type *) \
++ kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (branch == NULL) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[elem] = branch; \
++ DP("alloc %u", elem); \
++ } \
++} while (0)
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++ int ret = 0;
++
++ if (!ip || map->elements >= limit)
++ /* We could call the garbage collector
++ * but it's probably overkill */
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
++ ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
++ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
++ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++ ret = -EEXIST;
++ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
++ /* Lottery: I won! */
++ if (dtree->expires[d] == 0)
++ dtree->expires[d] = 1;
++ DP("%u %lu", d, dtree->expires[d]);
++ if (ret == 0)
++ map->elements++;
++ return ret;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
++ return __addip(set, req->ip,
++ req->timeout ? req->timeout : map->timeout,
++ hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ return __addip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ map->timeout,
++ hash_ip);
++}
++
++#define DELIP_WALK(map, elem, branch) do { \
++ if ((map)->tree[elem]) { \
++ branch = (map)->tree[elem]; \
++ } else \
++ return -EEXIST; \
++} while (0)
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++
++ if (!ip)
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DELIP_WALK(map, a, btree);
++ DELIP_WALK(btree, b, ctree);
++ DELIP_WALK(ctree, c, dtree);
++
++ if (dtree->expires[d]) {
++ dtree->expires[d] = 0;
++ map->elements--;
++ return 0;
++ }
++ return -EEXIST;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++#define LOOP_WALK_BEGIN(map, i, branch) \
++ for (i = 0; i < 256; i++) { \
++ if (!(map)->tree[i]) \
++ continue; \
++ branch = (map)->tree[i]
++
++#define LOOP_WALK_END }
++
++static void ip_tree_gc(unsigned long ul_set)
++{
++ struct ip_set *set = (void *) ul_set;
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ unsigned char i,j,k;
++
++ i = j = k = 0;
++ DP("gc: %s", set->name);
++ write_lock_bh(&set->lock);
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]) {
++ DP("gc: %u %u %u %u: expires %lu jiffies %lu",
++ a, b, c, d,
++ dtree->expires[d], jiffies);
++ if (map->timeout
++ && time_before(dtree->expires[d], jiffies)) {
++ dtree->expires[d] = 0;
++ map->elements--;
++ } else
++ k = 1;
++ }
++ }
++ if (k == 0) {
++ DP("gc: %s: leaf %u %u %u empty",
++ set->name, a, b, c);
++ kmem_cache_free(leaf_cachep, dtree);
++ ctree->tree[c] = NULL;
++ } else {
++ DP("gc: %s: leaf %u %u %u not empty",
++ set->name, a, b, c);
++ j = 1;
++ k = 0;
++ }
++ LOOP_WALK_END;
++ if (j == 0) {
++ DP("gc: %s: branch %u %u empty",
++ set->name, a, b);
++ kmem_cache_free(branch_cachep, ctree);
++ btree->tree[b] = NULL;
++ } else {
++ DP("gc: %s: branch %u %u not empty",
++ set->name, a, b);
++ i = 1;
++ j = k = 0;
++ }
++ LOOP_WALK_END;
++ if (i == 0) {
++ DP("gc: %s: branch %u empty",
++ set->name, a);
++ kmem_cache_free(branch_cachep, btree);
++ map->tree[a] = NULL;
++ } else {
++ DP("gc: %s: branch %u not empty",
++ set->name, a);
++ i = j = k = 0;
++ }
++ LOOP_WALK_END;
++ write_unlock_bh(&set->lock);
++
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static inline void init_gc_timer(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ /* Even if there is no timeout for the entries,
++ * we still have to call gc because delete
++ * do not clean up empty branches */
++ map->gc_interval = IPTREE_GC_TIME;
++ init_timer(&map->gc);
++ map->gc.data = (unsigned long) set;
++ map->gc.function = ip_tree_gc;
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iptree_create *req =
++ (struct ip_set_req_iptree_create *) data;
++ struct ip_set_iptree *map;
++
++ if (size != sizeof(struct ip_set_req_iptree_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree_create),
++ size);
++ return -EINVAL;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iptree));
++ return -ENOMEM;
++ }
++ memset(map, 0, sizeof(*map));
++ map->timeout = req->timeout;
++ map->elements = 0;
++ set->data = map;
++
++ init_gc_timer(set);
++
++ return 0;
++}
++
++static void __flush(struct ip_set_iptree *map)
++{
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ kmem_cache_free(leaf_cachep, dtree);
++ LOOP_WALK_END;
++ kmem_cache_free(branch_cachep, ctree);
++ LOOP_WALK_END;
++ kmem_cache_free(branch_cachep, btree);
++ LOOP_WALK_END;
++ map->elements = 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ /* gc might be running */
++ while (!del_timer(&map->gc))
++ msleep(IPTREE_DESTROY_SLEEP);
++ __flush(map);
++ kfree(map);
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ unsigned int timeout = map->timeout;
++
++ /* gc might be running */
++ while (!del_timer(&map->gc))
++ msleep(IPTREE_DESTROY_SLEEP);
++ __flush(map);
++ memset(map, 0, sizeof(*map));
++ map->timeout = timeout;
++
++ init_gc_timer(set);
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_req_iptree_create *header =
++ (struct ip_set_req_iptree_create *) data;
++
++ header->timeout = map->timeout;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ unsigned int count = 0;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++ count++;
++ }
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++
++ DP("members %u", count);
++ return (count * sizeof(struct ip_set_req_iptree));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ size_t offset = 0;
++ struct ip_set_req_iptree *entry;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
++ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
++ entry->timeout = !map->timeout ? 0
++ : (dtree->expires[d] - jiffies)/HZ;
++ offset += sizeof(struct ip_set_req_iptree);
++ }
++ }
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++}
++
++static struct ip_set_type ip_set_iptree = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_iptree),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iptree_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptree type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_iptree_init(void)
++{
++ int ret;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ branch_cachep = kmem_cache_create("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb),
++ 0, 0, NULL);
++#else
++ branch_cachep = kmem_cache_create("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb),
++ 0, 0, NULL, NULL);
++#endif
++ if (!branch_cachep) {
++ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ leaf_cachep = kmem_cache_create("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed),
++ 0, 0, NULL);
++#else
++ leaf_cachep = kmem_cache_create("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed),
++ 0, 0, NULL, NULL);
++#endif
++ if (!leaf_cachep) {
++ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
++ ret = -ENOMEM;
++ goto free_branch;
++ }
++ ret = ip_set_register_set_type(&ip_set_iptree);
++ if (ret == 0)
++ goto out;
++
++ kmem_cache_destroy(leaf_cachep);
++ free_branch:
++ kmem_cache_destroy(branch_cachep);
++ out:
++ return ret;
++}
++
++static void __exit ip_set_iptree_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_iptree);
++ kmem_cache_destroy(leaf_cachep);
++ kmem_cache_destroy(branch_cachep);
++}
++
++module_init(ip_set_iptree_init);
++module_exit(ip_set_iptree_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_iptreemap.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_iptreemap.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_iptreemap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_iptreemap.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,829 @@
++/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++/* This modules implements the iptreemap ipset type. It uses bitmaps to
++ * represent every single IPv4 address as a single bit. The bitmaps are managed
++ * in a tree structure, where the first three octets of an addresses are used
++ * as an index to find the bitmap and the last octet is used as the bit number.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
++
++#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
++#define IPTREEMAP_DESTROY_SLEEP (100)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct kmem_cache *cachep_b;
++static struct kmem_cache *cachep_c;
++static struct kmem_cache *cachep_d;
++#else
++static kmem_cache_t *cachep_b;
++static kmem_cache_t *cachep_c;
++static kmem_cache_t *cachep_d;
++#endif
++
++static struct ip_set_iptreemap_d *fullbitmap_d;
++static struct ip_set_iptreemap_c *fullbitmap_c;
++static struct ip_set_iptreemap_b *fullbitmap_b;
++
++#if defined(__LITTLE_ENDIAN)
++#define ABCD(a, b, c, d, addr) \
++ do { \
++ a = ((unsigned char *)addr)[3]; \
++ b = ((unsigned char *)addr)[2]; \
++ c = ((unsigned char *)addr)[1]; \
++ d = ((unsigned char *)addr)[0]; \
++ } while (0)
++#elif defined(__BIG_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[0]; \
++ b = ((unsigned char *)addrp)[1]; \
++ c = ((unsigned char *)addrp)[2]; \
++ d = ((unsigned char *)addrp)[3]; \
++} while (0)
++#else
++#error "Please fix asm/byteorder.h"
++#endif /* __LITTLE_ENDIAN */
++
++#define TESTIP_WALK(map, elem, branch, full) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) \
++ return 0; \
++ else if (branch == full) \
++ return 1; \
++ } while (0)
++
++#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) { \
++ branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (!branch) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[elem] = branch; \
++ } else if (branch == full) { \
++ return -EEXIST; \
++ } \
++ } while (0)
++
++#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
++ for (a = a1; a <= a2; a++) { \
++ branch = (map)->tree[a]; \
++ if (branch != full) { \
++ if ((a > a1 && a < a2) || (hint)) { \
++ if (branch) \
++ free(branch); \
++ (map)->tree[a] = full; \
++ continue; \
++ } else if (!branch) { \
++ branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (!branch) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[a] = branch; \
++ }
++
++#define ADDIP_RANGE_LOOP_END() \
++ } \
++ }
++
++#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) { \
++ return -EEXIST; \
++ } else if (branch == full) { \
++ branch = kmem_cache_alloc(cachep, flags); \
++ if (!branch) \
++ return -ENOMEM; \
++ memcpy(branch, full, sizeof(*full)); \
++ (map)->tree[elem] = branch; \
++ } \
++ } while (0)
++
++#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
++ for (a = a1; a <= a2; a++) { \
++ branch = (map)->tree[a]; \
++ if (branch) { \
++ if ((a > a1 && a < a2) || (hint)) { \
++ if (branch != full) \
++ free(branch); \
++ (map)->tree[a] = NULL; \
++ continue; \
++ } else if (branch == full) { \
++ branch = kmem_cache_alloc(cachep, flags); \
++ if (!branch) \
++ return -ENOMEM; \
++ memcpy(branch, full, sizeof(*branch)); \
++ (map)->tree[a] = branch; \
++ }
++
++#define DELIP_RANGE_LOOP_END() \
++ } \
++ }
++
++#define LOOP_WALK_BEGIN(map, i, branch) \
++ for (i = 0; i < 256; i++) { \
++ branch = (map)->tree[i]; \
++ if (likely(!branch)) \
++ continue;
++
++#define LOOP_WALK_END() \
++ }
++
++#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
++ count = -256; \
++ for (i = 0; i < 256; i++) { \
++ branch = (map)->tree[i]; \
++ if (likely(!branch)) \
++ continue; \
++ count++; \
++ if (branch == full) { \
++ count++; \
++ continue; \
++ }
++
++#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
++ if (-256 == count) { \
++ kmem_cache_free(cachep, branch); \
++ (map)->tree[i] = NULL; \
++ } else if (256 == count) { \
++ kmem_cache_free(cachep, branch); \
++ (map)->tree[i] = full; \
++ } \
++ }
++
++#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
++ for (i = 0; i < 256; i++) { \
++ if (!(map)->tree[i]) { \
++ if (inrange) { \
++ count++; \
++ inrange = 0; \
++ } \
++ continue; \
++ } \
++ branch = (map)->tree[i];
++
++#define LOOP_WALK_END_COUNT() \
++ }
++
++#define MIN(a, b) (a < b ? a : b)
++#define MAX(a, b) (a > b ? a : b)
++
++#define GETVALUE1(a, a1, b1, r) \
++ (a == a1 ? b1 : r)
++
++#define GETVALUE2(a, b, a1, b1, c1, r) \
++ (a == a1 && b == b1 ? c1 : r)
++
++#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
++ (a == a1 && b == b1 && c == c1 ? d1 : r)
++
++#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE1(a, a1, b1, 0) == 0 \
++ && GETVALUE1(a, a2, b2, 255) == 255 \
++ && c1 == 0 \
++ && c2 == 255 \
++ && d1 == 0 \
++ && d2 == 255 \
++ )
++
++#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
++ && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
++ && d1 == 0 \
++ && d2 == 255 \
++ )
++
++#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
++ && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
++ )
++
++
++static inline void
++free_d(struct ip_set_iptreemap_d *map)
++{
++ kmem_cache_free(cachep_d, map);
++}
++
++static inline void
++free_c(struct ip_set_iptreemap_c *map)
++{
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int i;
++
++ LOOP_WALK_BEGIN(map, i, dtree) {
++ if (dtree != fullbitmap_d)
++ free_d(dtree);
++ } LOOP_WALK_END();
++
++ kmem_cache_free(cachep_c, map);
++}
++
++static inline void
++free_b(struct ip_set_iptreemap_b *map)
++{
++ struct ip_set_iptreemap_c *ctree;
++ unsigned int i;
++
++ LOOP_WALK_BEGIN(map, i, ctree) {
++ if (ctree != fullbitmap_c)
++ free_c(ctree);
++ } LOOP_WALK_END();
++
++ kmem_cache_free(cachep_b, map);
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a, b, c, d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ TESTIP_WALK(map, a, btree, fullbitmap_b);
++ TESTIP_WALK(btree, b, ctree, fullbitmap_c);
++ TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
++
++ return !!test_bit(d, (void *) dtree->bitmap);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __testip(set, req->start, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++ int res;
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a, b, c, d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
++ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
++ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
++
++ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ return -EEXIST;
++
++ set_bit(b, (void *) btree->dirty);
++
++ return 0;
++}
++
++static inline int
++__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d;
++ unsigned char a1, b1, c1, d1;
++ unsigned char a2, b2, c2, d2;
++
++ if (start == end)
++ return __addip_single(set, start, hash_ip);
++
++ *hash_ip = start;
++
++ ABCD(a1, b1, c1, d1, &start);
++ ABCD(a2, b2, c2, d2, &end);
++
++ /* This is sooo ugly... */
++ ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
++ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
++ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
++ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
++ set_bit(d, (void *) dtree->bitmap);
++ set_bit(b, (void *) btree->dirty);
++ } ADDIP_RANGE_LOOP_END();
++ } ADDIP_RANGE_LOOP_END();
++ } ADDIP_RANGE_LOOP_END();
++
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++
++ return __addip_single(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a,b,c,d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
++ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
++ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
++
++ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ return -EEXIST;
++
++ set_bit(b, (void *) btree->dirty);
++
++ return 0;
++}
++
++static inline int
++__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d;
++ unsigned char a1, b1, c1, d1;
++ unsigned char a2, b2, c2, d2;
++
++ if (start == end)
++ return __delip_single(set, start, hash_ip, flags);
++
++ *hash_ip = start;
++
++ ABCD(a1, b1, c1, d1, &start);
++ ABCD(a2, b2, c2, d2, &end);
++
++ /* This is sooo ugly... */
++ DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
++ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
++ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
++ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
++ clear_bit(d, (void *) dtree->bitmap);
++ set_bit(b, (void *) btree->dirty);
++ } DELIP_RANGE_LOOP_END();
++ } DELIP_RANGE_LOOP_END();
++ } DELIP_RANGE_LOOP_END();
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
++}
++
++static int
++delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++ return __delip_single(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip,
++ GFP_ATOMIC);
++}
++
++/* Check the status of the bitmap
++ * -1 == all bits cleared
++ * 1 == all bits set
++ * 0 == anything else
++ */
++static inline int
++bitmap_status(struct ip_set_iptreemap_d *dtree)
++{
++ unsigned char first = dtree->bitmap[0];
++ int a;
++
++ for (a = 1; a < 32; a++)
++ if (dtree->bitmap[a] != first)
++ return 0;
++
++ return (first == 0 ? -1 : (first == 255 ? 1 : 0));
++}
++
++static void
++gc(unsigned long addr)
++{
++ struct ip_set *set = (struct ip_set *) addr;
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c;
++ int i, j, k;
++
++ write_lock_bh(&set->lock);
++
++ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
++ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
++ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ continue;
++ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
++ switch (bitmap_status(dtree)) {
++ case -1:
++ kmem_cache_free(cachep_d, dtree);
++ ctree->tree[c] = NULL;
++ k--;
++ break;
++ case 1:
++ kmem_cache_free(cachep_d, dtree);
++ ctree->tree[c] = fullbitmap_d;
++ k++;
++ break;
++ }
++ } LOOP_WALK_END();
++ } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
++ } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
++
++ write_unlock_bh(&set->lock);
++
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static inline void
++init_gc_timer(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ init_timer(&map->gc);
++ map->gc.data = (unsigned long) set;
++ map->gc.function = gc;
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map;
++
++ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
++ return -EINVAL;
++ }
++
++ map = kzalloc(sizeof(*map), GFP_KERNEL);
++ if (!map)
++ return -ENOMEM;
++
++ map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
++ set->data = map;
++
++ init_gc_timer(set);
++
++ return 0;
++}
++
++static inline void __flush(struct ip_set_iptreemap *map)
++{
++ struct ip_set_iptreemap_b *btree;
++ unsigned int a;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ if (btree != fullbitmap_b)
++ free_b(btree);
++ LOOP_WALK_END();
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ while (!del_timer(&map->gc))
++ msleep(IPTREEMAP_DESTROY_SLEEP);
++
++ __flush(map);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ while (!del_timer(&map->gc))
++ msleep(IPTREEMAP_DESTROY_SLEEP);
++
++ __flush(map);
++
++ memset(map, 0, sizeof(*map));
++
++ init_gc_timer(set);
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++
++ header->gc_interval = map->gc_interval;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d, inrange = 0, count = 0;
++
++ LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
++ LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
++ LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
++ for (d = 0; d < 256; d++) {
++ if (test_bit(d, (void *) dtree->bitmap)) {
++ inrange = 1;
++ } else if (inrange) {
++ count++;
++ inrange = 0;
++ }
++ }
++ } LOOP_WALK_END_COUNT();
++ } LOOP_WALK_END_COUNT();
++ } LOOP_WALK_END_COUNT();
++
++ if (inrange)
++ count++;
++
++ return (count * sizeof(struct ip_set_req_iptreemap));
++}
++
++static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++{
++ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++
++ entry->start = start;
++ entry->end = end;
++
++ return sizeof(*entry);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d, inrange = 0;
++ size_t offset = 0;
++ ip_set_ip_t start = 0, end = 0, ip;
++
++ LOOP_WALK_BEGIN(map, a, btree) {
++ LOOP_WALK_BEGIN(btree, b, ctree) {
++ LOOP_WALK_BEGIN(ctree, c, dtree) {
++ for (d = 0; d < 256; d++) {
++ if (test_bit(d, (void *) dtree->bitmap)) {
++ ip = ((a << 24) | (b << 16) | (c << 8) | d);
++ if (!inrange) {
++ inrange = 1;
++ start = ip;
++ } else if (end < ip - 1) {
++ offset += add_member(data, offset, start, end);
++ start = ip;
++ }
++ end = ip;
++ } else if (inrange) {
++ offset += add_member(data, offset, start, end);
++ inrange = 0;
++ }
++ }
++ } LOOP_WALK_END();
++ } LOOP_WALK_END();
++ } LOOP_WALK_END();
++
++ if (inrange)
++ add_member(data, offset, start, end);
++}
++
++static struct ip_set_type ip_set_iptreemap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = create,
++ .destroy = destroy,
++ .flush = flush,
++ .reqsize = sizeof(struct ip_set_req_iptreemap),
++ .addip = addip,
++ .addip_kernel = addip_kernel,
++ .delip = delip,
++ .delip_kernel = delip_kernel,
++ .testip = testip,
++ .testip_kernel = testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iptreemap_create),
++ .list_header = list_header,
++ .list_members_size = list_members_size,
++ .list_members = list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
++MODULE_DESCRIPTION("iptreemap type of IP sets");
++
++static int __init ip_set_iptreemap_init(void)
++{
++ int ret = -ENOMEM;
++ int a;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b),
++ 0, 0, NULL);
++#else
++ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_b) {
++ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
++ goto out;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c),
++ 0, 0, NULL);
++#else
++ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_c) {
++ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
++ goto outb;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d),
++ 0, 0, NULL);
++#else
++ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_d) {
++ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
++ goto outc;
++ }
++
++ fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
++ if (!fullbitmap_d)
++ goto outd;
++
++ fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
++ if (!fullbitmap_c)
++ goto outbitmapd;
++
++ fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
++ if (!fullbitmap_b)
++ goto outbitmapc;
++
++ ret = ip_set_register_set_type(&ip_set_iptreemap);
++ if (0 > ret)
++ goto outbitmapb;
++
++ /* Now init our global bitmaps */
++ memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
++
++ for (a = 0; a < 256; a++)
++ fullbitmap_c->tree[a] = fullbitmap_d;
++
++ for (a = 0; a < 256; a++)
++ fullbitmap_b->tree[a] = fullbitmap_c;
++ memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
++
++ return 0;
++
++outbitmapb:
++ kmem_cache_free(cachep_b, fullbitmap_b);
++outbitmapc:
++ kmem_cache_free(cachep_c, fullbitmap_c);
++outbitmapd:
++ kmem_cache_free(cachep_d, fullbitmap_d);
++outd:
++ kmem_cache_destroy(cachep_d);
++outc:
++ kmem_cache_destroy(cachep_c);
++outb:
++ kmem_cache_destroy(cachep_b);
++out:
++
++ return ret;
++}
++
++static void __exit ip_set_iptreemap_fini(void)
++{
++ ip_set_unregister_set_type(&ip_set_iptreemap);
++ kmem_cache_free(cachep_d, fullbitmap_d);
++ kmem_cache_free(cachep_c, fullbitmap_c);
++ kmem_cache_free(cachep_b, fullbitmap_b);
++ kmem_cache_destroy(cachep_d);
++ kmem_cache_destroy(cachep_c);
++ kmem_cache_destroy(cachep_b);
++}
++
++module_init(ip_set_iptreemap_init);
++module_exit(ip_set_iptreemap_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_macipmap.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_macipmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_macipmap.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,375 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the macipmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/if_ether.h>
++#include <linux/vmalloc.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_macipmap.h>
++
++static int
++testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
++ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->ip < map->first_ip || req->ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = req->ip;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
++ if (test_bit(IPSET_MACIP_ISSET,
++ (void *) &table[req->ip - map->first_ip].flags)) {
++ return (memcmp(req->ethernet,
++ &table[req->ip - map->first_ip].ethernet,
++ ETH_ALEN) == 0);
++ } else {
++ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++ }
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++ ip_set_ip_t ip;
++
++ ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return 0;
++
++ *hash_ip = ip;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (test_bit(IPSET_MACIP_ISSET,
++ (void *) &table[ip - map->first_ip].flags)) {
++ /* Is mac pointer valid?
++ * If so, compare... */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ return (skb_mac_header(skb) >= skb->head
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
++#else
++ return (skb->mac.raw >= skb->head
++ && (skb->mac.raw + ETH_HLEN) <= skb->data
++#endif
++ && (memcmp(eth_hdr(skb)->h_source,
++ &table[ip - map->first_ip].ethernet,
++ ETH_ALEN) == 0));
++ } else {
++ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++ }
++}
++
++/* returns 0 on success */
++static inline int
++__addip(struct ip_set *set,
++ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(IPSET_MACIP_ISSET,
++ (void *) &table[ip - map->first_ip].flags))
++ return -EEXIST;
++
++ *hash_ip = ip;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_macipmap *req =
++ (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++ return __addip(set, req->ip, req->ethernet, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t ip;
++
++ ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (!(skb_mac_header(skb) >= skb->head
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
++#else
++ if (!(skb->mac.raw >= skb->head
++ && (skb->mac.raw + ETH_HLEN) <= skb->data))
++#endif
++ return -EINVAL;
++
++ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
++ (void *)&table[ip - map->first_ip].flags))
++ return -EEXIST;
++
++ *hash_ip = ip;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_macipmap *req =
++ (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
++{
++ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_macipmap_create *req =
++ (struct ip_set_req_macipmap_create *) data;
++ struct ip_set_macipmap *map;
++
++ if (size != sizeof(struct ip_set_req_macipmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++ HIPQUAD(req->from), HIPQUAD(req->to));
++
++ if (req->from > req->to) {
++ DP("bad ip range");
++ return -ENOEXEC;
++ }
++
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big (max %d addresses)",
++ MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_macipmap));
++ return -ENOMEM;
++ }
++ map->flags = req->flags;
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ newbytes = members_size(map->first_ip, map->last_ip);
++ map->members = ip_set_malloc(newbytes);
++ DP("members: %u %p", newbytes, map->members);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_req_macipmap_create *header =
++ (struct ip_set_req_macipmap_create *) data;
++
++ DP("list_header %x %x %u", map->first_ip, map->last_ip,
++ map->flags);
++
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++ header->flags = map->flags;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ DP("%u", members_size(map->first_ip, map->last_ip));
++ return members_size(map->first_ip, map->last_ip);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ int bytes = members_size(map->first_ip, map->last_ip);
++
++ DP("members: %u %p", bytes, map->members);
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_macipmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_macipmap),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_macipmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("macipmap type of IP sets");
++
++static int __init ip_set_macipmap_init(void)
++{
++ init_max_malloc_size();
++ return ip_set_register_set_type(&ip_set_macipmap);
++}
++
++static void __exit ip_set_macipmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_macipmap);
++}
++
++module_init(ip_set_macipmap_init);
++module_exit(ip_set_macipmap_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_nethash.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_nethash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_nethash.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,497 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing a cidr nethash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_nethash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id_cidr(struct ip_set_nethash *map,
++ ip_set_ip_t ip,
++ unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = pack(ip, cidr);
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ }
++ return UINT_MAX;
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ __u32 id = UINT_MAX;
++ int i;
++
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ if (id != UINT_MAX)
++ break;
++ }
++ return id;
++}
++
++static inline int
++__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
++ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
++
++ *hash_ip = pack(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++
++ return __addip_base(map, *hash_ip);
++}
++
++static void
++update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
++{
++ unsigned char next;
++ int i;
++
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ if (map->cidr[i] == cidr) {
++ return;
++ } else if (map->cidr[i] < cidr) {
++ next = map->cidr[i];
++ map->cidr[i] = cidr;
++ cidr = next;
++ }
++ }
++ if (i < 30)
++ map->cidr[i] = cidr;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++ int ret;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ ret = __addip((struct ip_set_nethash *) set->data,
++ req->ip, req->cidr, hash_ip);
++
++ if (ret == 0)
++ update_cidr_sizes((struct ip_set_nethash *) set->data,
++ req->cidr);
++
++ return ret;
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ int ret = -ERANGE;
++ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (map->cidr[0])
++ ret = __addip(map, ip, map->cidr[0], hash_ip);
++
++ return ret;
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ ip_set_ip_t *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_nethash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new parameters */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_nethash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_nethash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_nethash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __addip_base(tmp, *elem);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ ip_set_ip_t id, *elem;
++
++ if (!ip)
++ return -ERANGE;
++
++ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ /* TODO: no garbage collection in map->cidr */
++ return __delip((struct ip_set_nethash *) set->data,
++ req->ip, req->cidr, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ int ret = -ERANGE;
++ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (map->cidr[0])
++ ret = __delip(map, ip, map->cidr[0], hash_ip);
++
++ return ret;
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_nethash_create *req =
++ (struct ip_set_req_nethash_create *) data;
++ struct ip_set_nethash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_nethash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_nethash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_nethash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ memset(map->cidr, 0, 30 * sizeof(unsigned char));
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ memset(map->cidr, 0, 30 * sizeof(unsigned char));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ struct ip_set_req_nethash_create *header =
++ (struct ip_set_req_nethash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_nethash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_nethash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_nethash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("nethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_nethash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_nethash);
++}
++
++static void __exit ip_set_nethash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_nethash);
++}
++
++module_init(ip_set_nethash_init);
++module_exit(ip_set_nethash_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_portmap.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_portmap.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ip_set_portmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ip_set_portmap.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,346 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing a port set type as a bitmap */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_portmap.h>
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ struct iphdr *iph = ip_hdr(skb);
++#else
++ struct iphdr *iph = skb->nh.iph;
++#endif
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++
++static inline int
++__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_port, map->members);
++}
++
++static int
++testport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __testport(set, req->port, hash_port);
++}
++
++static int
++testport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res;
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
++ if (port == INVALID_PORT)
++ return 0;
++
++ res = __testport(set, port, hash_port);
++
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_port, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
++}
++
++static int
++addport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __addport(set, req->port, hash_port);
++}
++
++static int
++addport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __addport(set, port, hash_port);
++}
++
++static inline int
++__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++ if (!test_and_clear_bit(port - map->first_port, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
++}
++
++static int
++delport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __delport(set, req->port, hash_port);
++}
++
++static int
++delport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __delport(set, port, hash_port);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_portmap_create *req =
++ (struct ip_set_req_portmap_create *) data;
++ struct ip_set_portmap *map;
++
++ if (size != sizeof(struct ip_set_req_portmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u to %u", req->from, req->to);
++
++ if (req->from > req->to) {
++ DP("bad port range");
++ return -ENOEXEC;
++ }
++
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big (max %d ports)",
++ MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_portmap));
++ return -ENOMEM;
++ }
++ map->first_port = req->from;
++ map->last_port = req->to;
++ newbytes = bitmap_bytes(req->from, req->to);
++ map->members = kmalloc(newbytes, GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ kfree(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_req_portmap_create *header =
++ (struct ip_set_req_portmap_create *) data;
++
++ DP("list_header %u %u", map->first_port, map->last_port);
++
++ header->from = map->first_port;
++ header->to = map->last_port;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ return bitmap_bytes(map->first_port, map->last_port);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ int bytes = bitmap_bytes(map->first_port, map->last_port);
++
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_portmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_portmap),
++ .addip = &addport,
++ .addip_kernel = &addport_kernel,
++ .delip = &delport,
++ .delip_kernel = &delport_kernel,
++ .testip = &testport,
++ .testip_kernel = &testport_kernel,
++ .header_size = sizeof(struct ip_set_req_portmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
++
++static int __init ip_set_portmap_init(void)
++{
++ return ip_set_register_set_type(&ip_set_portmap);
++}
++
++static void __exit ip_set_portmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_portmap);
++}
++
++module_init(ip_set_portmap_init);
++module_exit(ip_set_portmap_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ipt_set.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ipt_set.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ipt_set.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ipt_set.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,160 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module to match an IP set. */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ipt_set.h>
++
++static inline int
++match_set(const struct ipt_set_info *info,
++ const struct sk_buff *skb,
++ int inv)
++{
++ if (ip_set_testip_kernel(info->index, skb, info->flags))
++ inv = !inv;
++ return inv;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++static bool
++#else
++static int
++#endif
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++ const void *matchinfo,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ int offset, unsigned int protoff, bool *hotdrop)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ int offset, unsigned int protoff, int *hotdrop)
++#else
++ int offset, int *hotdrop)
++#endif
++{
++ const struct ipt_set_info_match *info = matchinfo;
++
++ return match_set(&info->match_set,
++ skb,
++ info->match_set.flags[0] & IPSET_MATCH_INV);
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++bool
++#else
++static int
++#endif
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ const void *inf,
++#else
++ const struct ipt_ip *ip,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++ void *matchinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ unsigned int matchsize,
++#endif
++ unsigned int hook_mask)
++{
++ struct ipt_set_info_match *info =
++ (struct ipt_set_info_match *) matchinfo;
++ ip_set_id_t index;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
++ ip_set_printk("invalid matchsize %d", matchsize);
++ return 0;
++ }
++#endif
++
++ index = ip_set_get_byindex(info->match_set.index);
++
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("Cannot find set indentified by id %u to match",
++ info->match_set.index);
++ return 0; /* error */
++ }
++ if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
++ ip_set_printk("That's nasty!");
++ return 0; /* error */
++ }
++
++ return 1;
++}
++
++static void destroy(
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ void *matchinfo, unsigned int matchsize)
++#else
++ void *matchinfo)
++#endif
++{
++ struct ipt_set_info_match *info = matchinfo;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
++ ip_set_printk("invalid matchsize %d", matchsize);
++ return;
++ }
++#endif
++ ip_set_put(info->match_set.index);
++}
++
++static struct ipt_match set_match = {
++ .name = "set",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++ .family = AF_INET,
++#endif
++ .match = &match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ .matchsize = sizeof(struct ipt_set_info_match),
++#endif
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptables IP set match module");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++#define ipt_register_match xt_register_match
++#define ipt_unregister_match xt_unregister_match
++#endif
++
++static int __init ipt_ipset_init(void)
++{
++ return ipt_register_match(&set_match);
++}
++
++static void __exit ipt_ipset_fini(void)
++{
++ ipt_unregister_match(&set_match);
++}
++
++module_init(ipt_ipset_init);
++module_exit(ipt_ipset_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/ipt_SET.c linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ipt_SET.c
+--- ./linux-2.6.22.4/net/ipv4/netfilter/ipt_SET.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/ipt_SET.c 2007-10-12 14:31:55.000000000 +0200
+@@ -0,0 +1,172 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* ipt_SET.c - netfilter target to manipulate IP sets */
++
++#include <linux/types.h>
++#include <linux/ip.h>
++#include <linux/timer.h>
++#include <linux/module.h>
++#include <linux/netfilter.h>
++#include <linux/netdevice.h>
++#include <linux/if.h>
++#include <linux/inetdevice.h>
++#include <linux/version.h>
++#include <net/protocol.h>
++#include <net/checksum.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_set.h>
++
++static unsigned int
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ const void *targinfo,
++ void *userinfo)
++#else
++ const void *targinfo)
++#endif
++{
++ const struct ipt_set_info_target *info = targinfo;
++
++ if (info->add_set.index != IP_SET_INVALID_ID)
++ ip_set_addip_kernel(info->add_set.index,
++ *pskb,
++ info->add_set.flags);
++ if (info->del_set.index != IP_SET_INVALID_ID)
++ ip_set_delip_kernel(info->del_set.index,
++ *pskb,
++ info->del_set.flags);
++
++ return IPT_CONTINUE;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++static bool
++#else
++static int
++#endif
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ const void *e,
++#else
++ const struct ipt_entry *e,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++ void *targinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ unsigned int targinfosize,
++#endif
++ unsigned int hook_mask)
++{
++ struct ipt_set_info_target *info =
++ (struct ipt_set_info_target *) targinfo;
++ ip_set_id_t index;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
++ DP("bad target info size %u", targinfosize);
++ return 0;
++ }
++#endif
++
++ if (info->add_set.index != IP_SET_INVALID_ID) {
++ index = ip_set_get_byindex(info->add_set.index);
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("cannot find add_set index %u as target",
++ info->add_set.index);
++ return 0; /* error */
++ }
++ }
++
++ if (info->del_set.index != IP_SET_INVALID_ID) {
++ index = ip_set_get_byindex(info->del_set.index);
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("cannot find del_set index %u as target",
++ info->del_set.index);
++ return 0; /* error */
++ }
++ }
++ if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
++ || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
++ ip_set_printk("That's nasty!");
++ return 0; /* error */
++ }
++
++ return 1;
++}
++
++static void destroy(
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ void *targetinfo, unsigned int targetsize)
++#else
++ void *targetinfo)
++#endif
++{
++ struct ipt_set_info_target *info = targetinfo;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
++ ip_set_printk("invalid targetsize %d", targetsize);
++ return;
++ }
++#endif
++ if (info->add_set.index != IP_SET_INVALID_ID)
++ ip_set_put(info->add_set.index);
++ if (info->del_set.index != IP_SET_INVALID_ID)
++ ip_set_put(info->del_set.index);
++}
++
++static struct ipt_target SET_target = {
++ .name = "SET",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++ .family = AF_INET,
++#endif
++ .target = target,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ .targetsize = sizeof(struct ipt_set_info_target),
++#endif
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptables IP set target module");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++#define ipt_register_target xt_register_target
++#define ipt_unregister_target xt_unregister_target
++#endif
++
++static int __init ipt_SET_init(void)
++{
++ return ipt_register_target(&SET_target);
++}
++
++static void __exit ipt_SET_fini(void)
++{
++ ipt_unregister_target(&SET_target);
++}
++
++module_init(ipt_SET_init);
++module_exit(ipt_SET_fini);
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/Kconfig linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/Kconfig
+--- ./linux-2.6.22.4/net/ipv4/netfilter/Kconfig 2007-08-21 06:33:06.000000000 +0200
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/Kconfig 2007-10-12 14:31:55.000000000 +0200
+@@ -402,5 +402,122 @@
+ Allows altering the ARP packet payload: source and destination
+ hardware and network addresses.
+
++config IP_NF_SET
++ tristate "IP set support"
++ depends on INET && NETFILTER
++ help
++ This option adds IP set support to the kernel.
++ In order to define and use sets, you need the userspace utility
++ ipset(8).
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_MAX
++ int "Maximum number of IP sets"
++ default 256
++ range 2 65534
++ depends on IP_NF_SET
++ help
++ You can define here default value of the maximum number
++ of IP sets for the kernel.
++
++ The value can be overriden by the 'max_sets' module
++ parameter of the 'ip_set' module.
++
++config IP_NF_SET_HASHSIZE
++ int "Hash size for bindings of IP sets"
++ default 1024
++ depends on IP_NF_SET
++ help
++ You can define here default value of the hash size for
++ bindings of IP sets.
++
++ The value can be overriden by the 'hash_size' module
++ parameter of the 'ip_set' module.
++
++config IP_NF_SET_IPMAP
++ tristate "ipmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_MACIPMAP
++ tristate "macipmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the macipmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_PORTMAP
++ tristate "portmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the portmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPHASH
++ tristate "iphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_NETHASH
++ tristate "nethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the nethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTHASH
++ tristate "ipporthash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipporthash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPTREE
++ tristate "iptree set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iptree set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPTREEMAP
++ tristate "iptreemap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iptreemap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_MATCH_SET
++ tristate "set match support"
++ depends on IP_NF_SET
++ help
++ Set matching matches against given IP sets.
++ You need the ipset utility to create and set up the sets.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_TARGET_SET
++ tristate "SET target support"
++ depends on IP_NF_SET
++ help
++ The SET target makes possible to add/delete entries
++ in IP sets.
++ You need the ipset utility to create and set up the sets.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++
+ endmenu
+
+diff -Nru ./linux-2.6.22.4/net/ipv4/netfilter/Makefile linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/Makefile
+--- ./linux-2.6.22.4/net/ipv4/netfilter/Makefile 2007-08-21 06:33:06.000000000 +0200
++++ linux-2.6.22.4.pom2patch.set/net/ipv4/netfilter/Makefile 2007-10-12 14:31:55.000000000 +0200
+@@ -48,6 +48,7 @@
+ obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
++obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
+ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+
+ # targets
+@@ -62,6 +63,18 @@
+ obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
+ obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
+ obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
++obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
++
++# sets
++obj-$(CONFIG_IP_NF_SET) += ip_set.o
++obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
++obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
++obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
++obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
++obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
++obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
+
+ # generic ARP tables
+ obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
+++ /dev/null
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,498 @@
-+#ifndef _IP_SET_H
-+#define _IP_SET_H
-+
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#if 0
-+#define IP_SET_DEBUG
-+#endif
-+
-+/*
-+ * A sockopt of such quality has hardly ever been seen before on the open
-+ * market! This little beauty, hardly ever used: above 64, so it's
-+ * traditionally used for firewalling, not touched (even once!) by the
-+ * 2.0, 2.2 and 2.4 kernels!
-+ *
-+ * Comes with its own certificate of authenticity, valid anywhere in the
-+ * Free world!
-+ *
-+ * Rusty, 19.4.2000
-+ */
-+#define SO_IP_SET 83
-+
-+/*
-+ * Heavily modify by Joakim Axelsson 08.03.2002
-+ * - Made it more modulebased
-+ *
-+ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
-+ * - bindings added
-+ * - in order to "deal with" backward compatibility, renamed to ipset
-+ */
-+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
-+ */
-+#define IP_SET_PROTOCOL_VERSION 2
-+
-+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
-+
-+/* Lets work with our own typedef for representing an IP address.
-+ * We hope to make the code more portable, possibly to IPv6...
-+ *
-+ * The representation works in HOST byte order, because most set types
-+ * will perform arithmetic operations and compare operations.
-+ *
-+ * For now the type is an uint32_t.
-+ *
-+ * Make sure to ONLY use the functions when translating and parsing
-+ * in order to keep the host byte order and make it more portable:
-+ * parse_ip()
-+ * parse_mask()
-+ * parse_ipandmask()
-+ * ip_tostring()
-+ * (Joakim: where are they???)
-+ */
-+
-+typedef uint32_t ip_set_ip_t;
-+
-+/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
-+ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
-+ */
-+typedef uint16_t ip_set_id_t;
-+
-+#define IP_SET_INVALID_ID 65535
-+
-+/* How deep we follow bindings */
-+#define IP_SET_MAX_BINDINGS 6
-+
-+/*
-+ * Option flags for kernel operations (ipt_set_info)
-+ */
-+#define IPSET_SRC 0x01 /* Source match/add */
-+#define IPSET_DST 0x02 /* Destination match/add */
-+#define IPSET_MATCH_INV 0x04 /* Inverse matching */
-+
-+/*
-+ * Set features
-+ */
-+#define IPSET_TYPE_IP 0x01 /* IP address type of set */
-+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
-+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
-+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
-+
-+/* Reserved keywords */
-+#define IPSET_TOKEN_DEFAULT ":default:"
-+#define IPSET_TOKEN_ALL ":all:"
-+
-+/* SO_IP_SET operation constants, and their request struct types.
-+ *
-+ * Operation ids:
-+ * 0-99: commands with version checking
-+ * 100-199: add/del/test/bind/unbind
-+ * 200-299: list, save, restore
-+ */
-+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
-+ *
-+ * Sets are identified by name.
-+ */
-+
-+#define IP_SET_REQ_STD \
-+ unsigned op; \
-+ unsigned version; \
-+ char name[IP_SET_MAXNAMELEN]
-+
-+#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
-+struct ip_set_req_create {
-+ IP_SET_REQ_STD;
-+ char typename[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
-+struct ip_set_req_std {
-+ IP_SET_REQ_STD;
-+};
-+
-+#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
-+/* Uses ip_set_req_std */
-+
-+#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
-+/* Uses ip_set_req_create */
-+
-+#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
-+/* Uses ip_set_req_create */
-+
-+union ip_set_name_index {
-+ char name[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+};
-+
-+#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
-+struct ip_set_req_get_set {
-+ unsigned op;
-+ unsigned version;
-+ union ip_set_name_index set;
-+};
-+
-+#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
-+/* Uses ip_set_req_get_set */
-+
-+#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
-+struct ip_set_req_version {
-+ unsigned op;
-+ unsigned version;
-+};
-+
-+/* Double shots operations:
-+ * add, del, test, bind and unbind.
-+ *
-+ * First we query the kernel to get the index and type of the target set,
-+ * then issue the command. Validity of IP is checked in kernel in order
-+ * to minimalize sockopt operations.
-+ */
-+
-+/* Get minimal set data for add/del/test/bind/unbind IP */
-+#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
-+struct ip_set_req_adt_get {
-+ unsigned op;
-+ unsigned version;
-+ union ip_set_name_index set;
-+ char typename[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_REQ_BYINDEX \
-+ unsigned op; \
-+ ip_set_id_t index;
-+
-+struct ip_set_req_adt {
-+ IP_SET_REQ_BYINDEX;
-+};
-+
-+#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
-+/* Uses ip_set_req_bind, with type specific addage */
-+struct ip_set_req_bind {
-+ IP_SET_REQ_BYINDEX;
-+ char binding[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
-+ * index = 0 means unbinding for all sets */
-+
-+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
-+/* Uses ip_set_req_bind, with type specific addage */
-+
-+/* Multiple shots operations: list, save, restore.
-+ *
-+ * - check kernel version and query the max number of sets
-+ * - get the basic information on all sets
-+ * and size required for the next step
-+ * - get actual set data: header, data, bindings
-+ */
-+
-+/* Get max_sets and the index of a queried set
-+ */
-+#define IP_SET_OP_MAX_SETS 0x00000020
-+struct ip_set_req_max_sets {
-+ unsigned op;
-+ unsigned version;
-+ ip_set_id_t max_sets; /* max_sets */
-+ ip_set_id_t sets; /* real number of sets */
-+ union ip_set_name_index set; /* index of set if name used */
-+};
-+
-+/* Get the id and name of the sets plus size for next step */
-+#define IP_SET_OP_LIST_SIZE 0x00000201
-+#define IP_SET_OP_SAVE_SIZE 0x00000202
-+struct ip_set_req_setnames {
-+ unsigned op;
-+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
-+ /* followed by sets number of struct ip_set_name_list */
-+};
-+
-+struct ip_set_name_list {
-+ char name[IP_SET_MAXNAMELEN];
-+ char typename[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+ ip_set_id_t id;
-+};
-+
-+/* The actual list operation */
-+#define IP_SET_OP_LIST 0x00000203
-+struct ip_set_req_list {
-+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
-+};
-+
-+struct ip_set_list {
-+ ip_set_id_t index;
-+ ip_set_id_t binding;
-+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
-+};
-+
-+struct ip_set_hash_list {
-+ ip_set_ip_t ip;
-+ ip_set_id_t binding;
-+};
-+
-+/* The save operation */
-+#define IP_SET_OP_SAVE 0x00000204
-+/* Uses ip_set_req_list, in the reply replaced by
-+ * sets number of struct ip_set_save plus a marker
-+ * ip_set_save followed by ip_set_hash_save structures.
-+ */
-+struct ip_set_save {
-+ ip_set_id_t index;
-+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+};
-+
-+/* At restoring, ip == 0 means default binding for the given set: */
-+struct ip_set_hash_save {
-+ ip_set_ip_t ip;
-+ ip_set_id_t id;
-+ ip_set_id_t binding;
-+};
-+
-+/* The restore operation */
-+#define IP_SET_OP_RESTORE 0x00000205
-+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
-+ * structures.
-+ */
-+struct ip_set_restore {
-+ char name[IP_SET_MAXNAMELEN];
-+ char typename[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+};
-+
-+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
-+{
-+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
-+}
-+
-+#ifdef __KERNEL__
-+
-+#define ip_set_printk(format, args...) \
-+ do { \
-+ printk("%s: %s: ", __FILE__, __FUNCTION__); \
-+ printk(format "\n" , ## args); \
-+ } while (0)
-+
-+#if defined(IP_SET_DEBUG)
-+#define DP(format, args...) \
-+ do { \
-+ printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
-+ printk(format "\n" , ## args); \
-+ } while (0)
-+#define IP_SET_ASSERT(x) \
-+ do { \
-+ if (!(x)) \
-+ printk("IP_SET_ASSERT: %s:%i(%s)\n", \
-+ __FILE__, __LINE__, __FUNCTION__); \
-+ } while (0)
-+#else
-+#define DP(format, args...)
-+#define IP_SET_ASSERT(x)
-+#endif
-+
-+struct ip_set;
-+
-+/*
-+ * The ip_set_type definition - one per set type, e.g. "ipmap".
-+ *
-+ * Each individual set has a pointer, set->type, going to one
-+ * of these structures. Function pointers inside the structure implement
-+ * the real behaviour of the sets.
-+ *
-+ * If not mentioned differently, the implementation behind the function
-+ * pointers of a set_type, is expected to return 0 if ok, and a negative
-+ * errno (e.g. -EINVAL) on error.
-+ */
-+struct ip_set_type {
-+ struct list_head list; /* next in list of set types */
-+
-+ /* test for IP in set (kernel: iptables -m set src|dst)
-+ * return 0 if not in set, 1 if in set.
-+ */
-+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* test for IP in set (userspace: ipset -T set IP)
-+ * return 0 if not in set, 1 if in set.
-+ */
-+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /*
-+ * Size of the data structure passed by when
-+ * adding/deletin/testing an entry.
-+ */
-+ size_t reqsize;
-+
-+ /* Add IP into set (userspace: ipset -A set IP)
-+ * Return -EEXIST if the address is already in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address was not already in the set, 0 is returned.
-+ */
-+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
-+ * Return -EEXIST if the address is already in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address was not already in the set, 0 is returned.
-+ */
-+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* remove IP from set (userspace: ipset -D set --entry x)
-+ * Return -EEXIST if the address is NOT in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address really was in the set, 0 is returned.
-+ */
-+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
-+ * Return -EEXIST if the address is NOT in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address really was in the set, 0 is returned.
-+ */
-+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* new set creation - allocated type specific items
-+ */
-+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
-+
-+ /* retry the operation after successfully tweaking the set
-+ */
-+ int (*retry) (struct ip_set *set);
-+
-+ /* set destruction - free type specific items
-+ * There is no return value.
-+ * Can be called only when child sets are destroyed.
-+ */
-+ void (*destroy) (struct ip_set *set);
-+
-+ /* set flushing - reset all bits in the set, or something similar.
-+ * There is no return value.
-+ */
-+ void (*flush) (struct ip_set *set);
-+
-+ /* Listing: size needed for header
-+ */
-+ size_t header_size;
-+
-+ /* Listing: Get the header
-+ *
-+ * Fill in the information in "data".
-+ * This function is always run after list_header_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
-+ */
-+ void (*list_header) (const struct ip_set *set,
-+ void *data);
-+
-+ /* Listing: Get the size for the set members
-+ */
-+ int (*list_members_size) (const struct ip_set *set);
-+
-+ /* Listing: Get the set members
-+ *
-+ * Fill in the information in "data".
-+ * This function is always run after list_member_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
-+ */
-+ void (*list_members) (const struct ip_set *set,
-+ void *data);
-+
-+ char typename[IP_SET_MAXNAMELEN];
-+ unsigned char features;
-+ int protocol_version;
-+
-+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
-+ struct module *me;
-+};
-+
-+extern int ip_set_register_set_type(struct ip_set_type *set_type);
-+extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
-+
-+/* A generic ipset */
-+struct ip_set {
-+ char name[IP_SET_MAXNAMELEN]; /* the name of the set */
-+ rwlock_t lock; /* lock for concurrency control */
-+ ip_set_id_t id; /* set id for swapping */
-+ ip_set_id_t binding; /* default binding for the set */
-+ atomic_t ref; /* in kernel and in hash references */
-+ struct ip_set_type *type; /* the set types */
-+ void *data; /* pooltype specific data */
-+};
-+
-+/* Structure to bind set elements to sets */
-+struct ip_set_hash {
-+ struct list_head list; /* list of clashing entries in hash */
-+ ip_set_ip_t ip; /* ip from set */
-+ ip_set_id_t id; /* set id */
-+ ip_set_id_t binding; /* set we bind the element to */
-+};
-+
-+/* register and unregister set references */
-+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
-+
-+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern int ip_set_testip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_H*/
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_iphash.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_iphash.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,30 @@
-+#ifndef __IP_SET_IPHASH_H
-+#define __IP_SET_IPHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_iphash {
-+ ip_set_ip_t *members; /* the iphash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_iphash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t netmask;
-+};
-+
-+struct ip_set_req_iphash {
-+ ip_set_ip_t ip;
-+};
-+
-+#endif /* __IP_SET_IPHASH_H */
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_ipmap.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_ipmap.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,56 @@
-+#ifndef __IP_SET_IPMAP_H
-+#define __IP_SET_IPMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_ipmap {
-+ void *members; /* the ipmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ ip_set_ip_t netmask; /* subnet netmask */
-+ ip_set_ip_t sizeid; /* size of set in IPs */
-+ ip_set_ip_t hosts; /* number of hosts in a subnet */
-+};
-+
-+struct ip_set_req_ipmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+ ip_set_ip_t netmask;
-+};
-+
-+struct ip_set_req_ipmap {
-+ ip_set_ip_t ip;
-+};
-+
-+unsigned int
-+mask_to_bits(ip_set_ip_t mask)
-+{
-+ unsigned int bits = 32;
-+ ip_set_ip_t maskaddr;
-+
-+ if (mask == 0xFFFFFFFF)
-+ return bits;
-+
-+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
-+ maskaddr <<= 1;
-+
-+ return bits;
-+}
-+
-+ip_set_ip_t
-+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
-+{
-+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
-+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
-+ mask <<= 1;
-+
-+ return mask;
-+}
-+
-+#endif /* __IP_SET_IPMAP_H */
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_ipporthash.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,34 @@
-+#ifndef __IP_SET_IPPORTHASH_H
-+#define __IP_SET_IPPORTHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
-+
-+struct ip_set_ipporthash {
-+ ip_set_ip_t *members; /* the ipporthash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_ipporthash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_ipporthash {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t port;
-+};
-+
-+#endif /* __IP_SET_IPPORTHASH_H */
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_iptree.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_iptree.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,40 @@
-+#ifndef __IP_SET_IPTREE_H
-+#define __IP_SET_IPTREE_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_iptreed {
-+ unsigned long expires[256]; /* x.x.x.ADDR */
-+};
-+
-+struct ip_set_iptreec {
-+ struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
-+};
-+
-+struct ip_set_iptreeb {
-+ struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
-+};
-+
-+struct ip_set_iptree {
-+ unsigned int timeout;
-+ unsigned int gc_interval;
-+#ifdef __KERNEL__
-+ uint32_t elements; /* number of elements */
-+ struct timer_list gc;
-+ struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
-+#endif
-+};
-+
-+struct ip_set_req_iptree_create {
-+ unsigned int timeout;
-+};
-+
-+struct ip_set_req_iptree {
-+ ip_set_ip_t ip;
-+ unsigned int timeout;
-+};
-+
-+#endif /* __IP_SET_IPTREE_H */
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_macipmap.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_macipmap.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,38 @@
-+#ifndef __IP_SET_MACIPMAP_H
-+#define __IP_SET_MACIPMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
-+
-+/* general flags */
-+#define IPSET_MACIP_MATCHUNSET 1
-+
-+/* per ip flags */
-+#define IPSET_MACIP_ISSET 1
-+
-+struct ip_set_macipmap {
-+ void *members; /* the macipmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ u_int32_t flags;
-+};
-+
-+struct ip_set_req_macipmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+ u_int32_t flags;
-+};
-+
-+struct ip_set_req_macipmap {
-+ ip_set_ip_t ip;
-+ unsigned char ethernet[ETH_ALEN];
-+};
-+
-+struct ip_set_macip {
-+ unsigned short flags;
-+ unsigned char ethernet[ETH_ALEN];
-+};
-+
-+#endif /* __IP_SET_MACIPMAP_H */
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_malloc.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_malloc.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,116 @@
-+#ifndef _IP_SET_MALLOC_H
-+#define _IP_SET_MALLOC_H
-+
-+#ifdef __KERNEL__
-+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
-+
-+static inline void init_max_malloc_size(void)
-+{
-+#define CACHE(x) max_malloc_size = x;
-+#include <linux/kmalloc_sizes.h>
-+#undef CACHE
-+}
-+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
-+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
-+}
-+
-+struct harray {
-+ size_t max_elements;
-+ void *arrays[0];
-+};
-+
-+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
-+{
-+ struct harray *harray;
-+ size_t max_elements, size, i, j;
-+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
-+
-+ if (typesize > max_malloc_size)
-+ return NULL;
-+
-+ max_elements = max_malloc_size/typesize;
-+ size = hashsize/max_elements;
-+ if (hashsize % max_elements)
-+ size++;
-+
-+ /* Last pointer signals end of arrays */
-+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
-+ flags);
-+
-+ if (!harray)
-+ return NULL;
-+
-+ for (i = 0; i < size - 1; i++) {
-+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
-+ if (!harray->arrays[i])
-+ goto undo;
-+ memset(harray->arrays[i], 0, max_elements * typesize);
-+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
-+ flags);
-+ if (!harray->arrays[i])
-+ goto undo;
-+ memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
-+
-+ harray->max_elements = max_elements;
-+ harray->arrays[size] = NULL;
-+
-+ return (void *)harray;
-+
-+ undo:
-+ for (j = 0; j < i; j++) {
-+ kfree(harray->arrays[j]);
-+ }
-+ kfree(harray);
-+ return NULL;
-+}
-+
-+static inline void harray_free(void *h)
-+{
-+ struct harray *harray = (struct harray *) h;
-+ size_t i;
-+
-+ for (i = 0; harray->arrays[i] != NULL; i++)
-+ kfree(harray->arrays[i]);
-+ kfree(harray);
-+}
-+
-+static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
-+{
-+ struct harray *harray = (struct harray *) h;
-+ size_t i;
-+
-+ for (i = 0; harray->arrays[i+1] != NULL; i++)
-+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
-+ (hashsize - i * harray->max_elements) * typesize);
-+}
-+
-+#define HARRAY_ELEM(h, type, which) \
-+({ \
-+ struct harray *__h = (struct harray *)(h); \
-+ ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
-+ + (which)%(__h)->max_elements); \
-+})
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_MALLOC_H*/
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_nethash.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_nethash.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,55 @@
-+#ifndef __IP_SET_NETHASH_H
-+#define __IP_SET_NETHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_nethash {
-+ ip_set_ip_t *members; /* the nethash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_nethash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+};
-+
-+struct ip_set_req_nethash {
-+ ip_set_ip_t ip;
-+ unsigned char cidr;
-+};
-+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
-+#endif /* __IP_SET_NETHASH_H */
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_portmap.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ip_set_portmap.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,25 @@
-+#ifndef __IP_SET_PORTMAP_H
-+#define __IP_SET_PORTMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
-+
-+struct ip_set_portmap {
-+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
-+};
-+
-+struct ip_set_req_portmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
-+};
-+
-+#endif /* __IP_SET_PORTMAP_H */
-Index: linux-2.6.23-rc6/include/linux/netfilter_ipv4/ipt_set.h
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/include/linux/netfilter_ipv4/ipt_set.h 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,21 @@
-+#ifndef _IPT_SET_H
-+#define _IPT_SET_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+struct ipt_set_info {
-+ ip_set_id_t index;
-+ u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
-+};
-+
-+/* match info */
-+struct ipt_set_info_match {
-+ struct ipt_set_info match_set;
-+};
-+
-+struct ipt_set_info_target {
-+ struct ipt_set_info add_set;
-+ struct ipt_set_info del_set;
-+};
-+
-+#endif /*_IPT_SET_H*/
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ip_set.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ip_set.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,2001 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module for IP set management */
-+
-+#include <linux/version.h>
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+#include <linux/config.h>
-+#endif
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/kmod.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <asm/semaphore.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+
-+#define ASSERT_READ_LOCK(x)
-+#define ASSERT_WRITE_LOCK(x)
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+static struct list_head set_type_list; /* all registered sets */
-+static struct ip_set **ip_set_list; /* all individual sets */
-+static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
-+static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
-+static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
-+static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
-+static struct list_head *ip_set_hash; /* hash of bindings */
-+static unsigned int ip_set_hash_random; /* random seed */
-+
-+/*
-+ * Sets are identified either by the index in ip_set_list or by id.
-+ * The id never changes and is used to find a key in the hash.
-+ * The index may change by swapping and used at all other places
-+ * (set/SET netfilter modules, binding value, etc.)
-+ *
-+ * Userspace requests are serialized by ip_set_mutex and sets can
-+ * be deleted only from userspace. Therefore ip_set_list locking
-+ * must obey the following rules:
-+ *
-+ * - kernel requests: read and write locking mandatory
-+ * - user requests: read locking optional, write locking mandatory
-+ */
-+
-+static inline void
-+__ip_set_get(ip_set_id_t index)
-+{
-+ atomic_inc(&ip_set_list[index]->ref);
-+}
-+
-+static inline void
-+__ip_set_put(ip_set_id_t index)
-+{
-+ atomic_dec(&ip_set_list[index]->ref);
-+}
-+
-+/*
-+ * Binding routines
-+ */
-+
-+static inline struct ip_set_hash *
-+__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ struct ip_set_hash *set_hash;
-+
-+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
-+ if (set_hash->id == id && set_hash->ip == ip)
-+ return set_hash;
-+
-+ return NULL;
-+}
-+
-+static ip_set_id_t
-+ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+
-+ ASSERT_READ_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
-+ set_hash = __ip_set_find(key, id, ip);
-+
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip),
-+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
-+
-+ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
-+}
-+
-+static inline void
-+__set_hash_del(struct ip_set_hash *set_hash)
-+{
-+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
-+
-+ __ip_set_put(set_hash->binding);
-+ list_del(&set_hash->list);
-+ kfree(set_hash);
-+}
-+
-+static int
-+ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+ write_lock_bh(&ip_set_lock);
-+ set_hash = __ip_set_find(key, id, ip);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip),
-+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
-+
-+ if (set_hash != NULL)
-+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+}
-+
-+static int
-+ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+ int ret = 0;
-+
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ IP_SET_ASSERT(ip_set_list[binding]);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip), ip_set_list[binding]->name);
-+ write_lock_bh(&ip_set_lock);
-+ set_hash = __ip_set_find(key, id, ip);
-+ if (!set_hash) {
-+ set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
-+ if (!set_hash) {
-+ ret = -ENOMEM;
-+ goto unlock;
-+ }
-+ INIT_LIST_HEAD(&set_hash->list);
-+ set_hash->id = id;
-+ set_hash->ip = ip;
-+ list_add(&set_hash->list, &ip_set_hash[key]);
-+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
-+ DP("overwrite binding: %s",
-+ ip_set_list[set_hash->binding]->name);
-+ __ip_set_put(set_hash->binding);
-+ }
-+ set_hash->binding = binding;
-+ __ip_set_get(set_hash->binding);
-+ DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
-+ key, id, ip_set_list[id]->name,
-+ HIPQUAD(ip), binding, ip_set_list[binding]->name);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return ret;
-+}
-+
-+#define FOREACH_HASH_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __key; \
-+ struct ip_set_hash *__set_hash; \
-+ \
-+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
-+ list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
-+ fn(__set_hash , ## args); \
-+ } \
-+})
-+
-+#define FOREACH_HASH_RW_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __key; \
-+ struct ip_set_hash *__set_hash, *__n; \
-+ \
-+ ASSERT_WRITE_LOCK(&ip_set_lock); \
-+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
-+ list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
-+ fn(__set_hash , ## args); \
-+ } \
-+})
-+
-+/* Add, del and test set entries from kernel */
-+
-+#define follow_bindings(index, set, ip) \
-+((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
-+ || (index = (set)->binding) != IP_SET_INVALID_ID)
-+
-+int
-+ip_set_testip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ read_lock_bh(&set->lock);
-+ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
-+ read_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while (res > 0
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return res;
-+}
-+
-+void
-+ip_set_addip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ retry:
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ write_lock_bh(&set->lock);
-+ res = set->type->addip_kernel(set, skb, &ip, flags, i++);
-+ write_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ if (res == -EAGAIN
-+ && set->type->retry
-+ && (res = set->type->retry(set)) == 0)
-+ goto retry;
-+}
-+
-+void
-+ip_set_delip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ write_lock_bh(&set->lock);
-+ res = set->type->delip_kernel(set, skb, &ip, flags, i++);
-+ write_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+}
-+
-+/* Register and deregister settype */
-+
-+static inline struct ip_set_type *
-+find_set_type(const char *name)
-+{
-+ struct ip_set_type *set_type;
-+
-+ list_for_each_entry(set_type, &set_type_list, list)
-+ if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
-+ return set_type;
-+ return NULL;
-+}
-+
-+int
-+ip_set_register_set_type(struct ip_set_type *set_type)
-+{
-+ int ret = 0;
-+
-+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
-+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
-+ set_type->typename,
-+ set_type->protocol_version,
-+ IP_SET_PROTOCOL_VERSION);
-+ return -EINVAL;
-+ }
-+
-+ write_lock_bh(&ip_set_lock);
-+ if (find_set_type(set_type->typename)) {
-+ /* Duplicate! */
-+ ip_set_printk("'%s' already registered!",
-+ set_type->typename);
-+ ret = -EINVAL;
-+ goto unlock;
-+ }
-+ if (!try_module_get(THIS_MODULE)) {
-+ ret = -EFAULT;
-+ goto unlock;
-+ }
-+ list_add(&set_type->list, &set_type_list);
-+ DP("'%s' registered.", set_type->typename);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return ret;
-+}
-+
-+void
-+ip_set_unregister_set_type(struct ip_set_type *set_type)
-+{
-+ write_lock_bh(&ip_set_lock);
-+ if (!find_set_type(set_type->typename)) {
-+ ip_set_printk("'%s' not registered?",
-+ set_type->typename);
-+ goto unlock;
-+ }
-+ list_del(&set_type->list);
-+ module_put(THIS_MODULE);
-+ DP("'%s' unregistered.", set_type->typename);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+
-+}
-+
-+/*
-+ * Userspace routines
-+ */
-+
-+/*
-+ * Find set by name, reference it once. The reference makes sure the
-+ * thing pointed to, does not go away under our feet. Drop the reference
-+ * later, using ip_set_put().
-+ */
-+ip_set_id_t
-+ip_set_get_byname(const char *name)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ down(&ip_set_app_mutex);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
-+ __ip_set_get(i);
-+ index = i;
-+ break;
-+ }
-+ }
-+ up(&ip_set_app_mutex);
-+ return index;
-+}
-+
-+/*
-+ * Find set by index, reference it once. The reference makes sure the
-+ * thing pointed to, does not go away under our feet. Drop the reference
-+ * later, using ip_set_put().
-+ */
-+ip_set_id_t
-+ip_set_get_byindex(ip_set_id_t index)
-+{
-+ down(&ip_set_app_mutex);
-+
-+ if (index >= ip_set_max)
-+ return IP_SET_INVALID_ID;
-+
-+ if (ip_set_list[index])
-+ __ip_set_get(index);
-+ else
-+ index = IP_SET_INVALID_ID;
-+
-+ up(&ip_set_app_mutex);
-+ return index;
-+}
-+
-+/*
-+ * If the given set pointer points to a valid set, decrement
-+ * reference count by 1. The caller shall not assume the index
-+ * to be valid, after calling this function.
-+ */
-+void ip_set_put(ip_set_id_t index)
-+{
-+ down(&ip_set_app_mutex);
-+ if (ip_set_list[index])
-+ __ip_set_put(index);
-+ up(&ip_set_app_mutex);
-+}
-+
-+/* Find a set by name or index */
-+static ip_set_id_t
-+ip_set_find_byname(const char *name)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
-+ index = i;
-+ break;
-+ }
-+ }
-+ return index;
-+}
-+
-+static ip_set_id_t
-+ip_set_find_byindex(ip_set_id_t index)
-+{
-+ if (index >= ip_set_max || ip_set_list[index] == NULL)
-+ index = IP_SET_INVALID_ID;
-+
-+ return index;
-+}
-+
-+/*
-+ * Add, del, test, bind and unbind
-+ */
-+
-+static inline int
-+__ip_set_testip(struct ip_set *set,
-+ const void *data,
-+ size_t size,
-+ ip_set_ip_t *ip)
-+{
-+ int res;
-+
-+ read_lock_bh(&set->lock);
-+ res = set->type->testip(set, data, size, ip);
-+ read_unlock_bh(&set->lock);
-+
-+ return res;
-+}
-+
-+static int
-+__ip_set_addip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ do {
-+ write_lock_bh(&set->lock);
-+ res = set->type->addip(set, data, size, &ip);
-+ write_unlock_bh(&set->lock);
-+ } while (res == -EAGAIN
-+ && set->type->retry
-+ && (res = set->type->retry(set)) == 0);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_addip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+
-+ return __ip_set_addip(index,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt));
-+}
-+
-+static int
-+ip_set_delip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ write_lock_bh(&set->lock);
-+ res = set->type->delip(set,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt),
-+ &ip);
-+ write_unlock_bh(&set->lock);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_testip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt),
-+ &ip);
-+
-+ return (res > 0 ? -EEXIST : res);
-+}
-+
-+static int
-+ip_set_bindip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_id_t binding;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of a set */
-+ char *binding_name;
-+
-+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
-+ return -EINVAL;
-+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ binding = ip_set_find_byname(binding_name);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ write_lock_bh(&ip_set_lock);
-+ /* Sets as binding values are referenced */
-+ if (set->binding != IP_SET_INVALID_ID)
-+ __ip_set_put(set->binding);
-+ set->binding = binding;
-+ __ip_set_get(set->binding);
-+ write_unlock_bh(&ip_set_lock);
-+
-+ return 0;
-+ }
-+ binding = ip_set_find_byname(req_bind->binding);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
-+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
-+ if (res >= 0)
-+ res = ip_set_hash_add(set->id, ip, binding);
-+
-+ return res;
-+}
-+
-+#define FOREACH_SET_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __i; \
-+ struct ip_set *__set; \
-+ \
-+ for (__i = 0; __i < ip_set_max; __i++) { \
-+ __set = ip_set_list[__i]; \
-+ if (__set != NULL) \
-+ fn(__set , ##args); \
-+ } \
-+})
-+
-+static inline void
-+__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
-+{
-+ if (set_hash->id == id)
-+ __set_hash_del(set_hash);
-+}
-+
-+static inline void
-+__unbind_default(struct ip_set *set)
-+{
-+ if (set->binding != IP_SET_INVALID_ID) {
-+ /* Sets as binding values are referenced */
-+ __ip_set_put(set->binding);
-+ set->binding = IP_SET_INVALID_ID;
-+ }
-+}
-+
-+static int
-+ip_set_unbindip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ DP("");
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ DP("%u %s", index, req_bind->binding);
-+ if (index == IP_SET_INVALID_ID) {
-+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of sets */
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_SET_DO(__unbind_default);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all bindings of all sets*/
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ }
-+ DP("unreachable reached!");
-+ return -EINVAL;
-+ }
-+
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of set */
-+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
-+
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ write_lock_bh(&ip_set_lock);
-+ /* Sets in hash values are referenced */
-+ __ip_set_put(set->binding);
-+ set->binding = IP_SET_INVALID_ID;
-+ write_unlock_bh(&ip_set_lock);
-+
-+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all bindings */
-+
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ }
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+
-+ DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
-+ if (res >= 0)
-+ res = ip_set_hash_del(set->id, ip);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_testbind(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_id_t binding;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of set */
-+ char *binding_name;
-+
-+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
-+ return -EINVAL;
-+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ binding = ip_set_find_byname(binding_name);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ res = (set->binding == binding) ? -EEXIST : 0;
-+
-+ return res;
-+ }
-+ binding = ip_set_find_byname(req_bind->binding);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
-+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
-+ if (res >= 0)
-+ res = (ip_set_find_in_hash(set->id, ip) == binding)
-+ ? -EEXIST : 0;
-+
-+ return res;
-+}
-+
-+static struct ip_set_type *
-+find_set_type_rlock(const char *typename)
-+{
-+ struct ip_set_type *type;
-+
-+ read_lock_bh(&ip_set_lock);
-+ type = find_set_type(typename);
-+ if (type == NULL)
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return type;
-+}
-+
-+static int
-+find_free_id(const char *name,
-+ ip_set_id_t *index,
-+ ip_set_id_t *id)
-+{
-+ ip_set_id_t i;
-+
-+ *id = IP_SET_INVALID_ID;
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] == NULL) {
-+ if (*id == IP_SET_INVALID_ID)
-+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
-+ /* Name clash */
-+ return -EEXIST;
-+ }
-+ if (*id == IP_SET_INVALID_ID)
-+ /* No free slot remained */
-+ return -ERANGE;
-+ /* Check that index is usable as id (swapping) */
-+ check:
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && ip_set_list[i]->id == *id) {
-+ *id = i;
-+ goto check;
-+ }
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Create a set
-+ */
-+static int
-+ip_set_create(const char *name,
-+ const char *typename,
-+ ip_set_id_t restore,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set;
-+ ip_set_id_t index = 0, id;
-+ int res = 0;
-+
-+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
-+ /*
-+ * First, and without any locks, allocate and initialize
-+ * a normal base set structure.
-+ */
-+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
-+ if (!set)
-+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
-+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
-+ set->binding = IP_SET_INVALID_ID;
-+ atomic_set(&set->ref, 0);
-+
-+ /*
-+ * Next, take the &ip_set_lock, check that we know the type,
-+ * and take a reference on the type, to make sure it
-+ * stays available while constructing our new set.
-+ *
-+ * After referencing the type, we drop the &ip_set_lock,
-+ * and let the new set construction run without locks.
-+ */
-+ set->type = find_set_type_rlock(typename);
-+ if (set->type == NULL) {
-+ /* Try loading the module */
-+ char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
-+ strcpy(modulename, "ip_set_");
-+ strcat(modulename, typename);
-+ DP("try to load %s", modulename);
-+ request_module(modulename);
-+ set->type = find_set_type_rlock(typename);
-+ }
-+ if (set->type == NULL) {
-+ ip_set_printk("no set type '%s', set '%s' not created",
-+ typename, name);
-+ res = -ENOENT;
-+ goto out;
-+ }
-+ if (!try_module_get(set->type->me)) {
-+ read_unlock_bh(&ip_set_lock);
-+ res = -EFAULT;
-+ goto out;
-+ }
-+ read_unlock_bh(&ip_set_lock);
-+
-+ /*
-+ * Without holding any locks, create private part.
-+ */
-+ res = set->type->create(set, data, size);
-+ if (res != 0)
-+ goto put_out;
-+
-+ /* BTW, res==0 here. */
-+
-+ /*
-+ * Here, we have a valid, constructed set. &ip_set_lock again,
-+ * find free id/index and check that it is not already in
-+ * ip_set_list.
-+ */
-+ write_lock_bh(&ip_set_lock);
-+ if ((res = find_free_id(set->name, &index, &id)) != 0) {
-+ DP("no free id!");
-+ goto cleanup;
-+ }
-+
-+ /* Make sure restore gets the same index */
-+ if (restore != IP_SET_INVALID_ID && index != restore) {
-+ DP("Can't restore, sets are screwed up");
-+ res = -ERANGE;
-+ goto cleanup;
-+ }
-+
-+ /*
-+ * Finally! Add our shiny new set to the list, and be done.
-+ */
-+ DP("create: '%s' created with index %u, id %u!", set->name, index, id);
-+ set->id = id;
-+ ip_set_list[index] = set;
-+ write_unlock_bh(&ip_set_lock);
-+ return res;
-+
-+ cleanup:
-+ write_unlock_bh(&ip_set_lock);
-+ set->type->destroy(set);
-+ put_out:
-+ module_put(set->type->me);
-+ out:
-+ kfree(set);
-+ return res;
-+}
-+
-+/*
-+ * Destroy a given existing set
-+ */
-+static void
-+ip_set_destroy_set(ip_set_id_t index)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+
-+ IP_SET_ASSERT(set);
-+ DP("set: %s", set->name);
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
-+ if (set->binding != IP_SET_INVALID_ID)
-+ __ip_set_put(set->binding);
-+ ip_set_list[index] = NULL;
-+ write_unlock_bh(&ip_set_lock);
-+
-+ /* Must call it without holding any lock */
-+ set->type->destroy(set);
-+ module_put(set->type->me);
-+ kfree(set);
-+}
-+
-+/*
-+ * Destroy a set - or all sets
-+ * Sets must not be referenced/used.
-+ */
-+static int
-+ip_set_destroy(ip_set_id_t index)
-+{
-+ ip_set_id_t i;
-+
-+ /* ref modification always protected by the mutex */
-+ if (index != IP_SET_INVALID_ID) {
-+ if (atomic_read(&ip_set_list[index]->ref))
-+ return -EBUSY;
-+ ip_set_destroy_set(index);
-+ } else {
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && (atomic_read(&ip_set_list[i]->ref)))
-+ return -EBUSY;
-+ }
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL)
-+ ip_set_destroy_set(i);
-+ }
-+ }
-+ return 0;
-+}
-+
-+static void
-+ip_set_flush_set(struct ip_set *set)
-+{
-+ DP("set: %s %u", set->name, set->id);
-+
-+ write_lock_bh(&set->lock);
-+ set->type->flush(set);
-+ write_unlock_bh(&set->lock);
-+}
-+
-+/*
-+ * Flush data in a set - or in all sets
-+ */
-+static int
-+ip_set_flush(ip_set_id_t index)
-+{
-+ if (index != IP_SET_INVALID_ID) {
-+ IP_SET_ASSERT(ip_set_list[index]);
-+ ip_set_flush_set(ip_set_list[index]);
-+ } else
-+ FOREACH_SET_DO(ip_set_flush_set);
-+
-+ return 0;
-+}
-+
-+/* Rename a set */
-+static int
-+ip_set_rename(ip_set_id_t index, const char *name)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_id_t i;
-+ int res = 0;
-+
-+ DP("set: %s to %s", set->name, name);
-+ write_lock_bh(&ip_set_lock);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
-+ res = -EEXIST;
-+ goto unlock;
-+ }
-+ }
-+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return res;
-+}
-+
-+/*
-+ * Swap two sets so that name/index points to the other.
-+ * References are also swapped.
-+ */
-+static int
-+ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
-+{
-+ struct ip_set *from = ip_set_list[from_index];
-+ struct ip_set *to = ip_set_list[to_index];
-+ char from_name[IP_SET_MAXNAMELEN];
-+ u_int32_t from_ref;
-+
-+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
-+ if (from->type->features != to->type->features)
-+ return -ENOEXEC;
-+
-+ /* No magic here: ref munging protected by the mutex */
-+ write_lock_bh(&ip_set_lock);
-+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
-+ from_ref = atomic_read(&from->ref);
-+
-+ strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
-+ atomic_set(&from->ref, atomic_read(&to->ref));
-+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
-+ atomic_set(&to->ref, from_ref);
-+
-+ ip_set_list[from_index] = to;
-+ ip_set_list[to_index] = from;
-+
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+}
-+
-+/*
-+ * List set data
-+ */
-+
-+static inline void
-+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
-+{
-+ if (set_hash->id == id)
-+ *size += sizeof(struct ip_set_hash_list);
-+}
-+
-+static inline void
-+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
-+{
-+ if (set_hash->id == id)
-+ *size += sizeof(struct ip_set_hash_save);
-+}
-+
-+static inline void
-+__set_hash_bindings(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, void *data, int *used)
-+{
-+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
-+
-+ hash_list->ip = set_hash->ip;
-+ hash_list->binding = set_hash->binding;
-+ *used += sizeof(struct ip_set_hash_list);
-+ }
-+}
-+
-+static int ip_set_list_set(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_list *set_list;
-+
-+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
-+
-+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
-+
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_list) > len)
-+ goto not_enough_mem;
-+ *used += sizeof(struct ip_set_list);
-+
-+ read_lock_bh(&set->lock);
-+ /* Get and ensure set specific header size */
-+ set_list->header_size = set->type->header_size;
-+ if (*used + set_list->header_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in the header */
-+ set_list->index = index;
-+ set_list->binding = set->binding;
-+ set_list->ref = atomic_read(&set->ref);
-+
-+ /* Fill in set spefific header data */
-+ set->type->list_header(set, data + *used);
-+ *used += set_list->header_size;
-+
-+ /* Get and ensure set specific members size */
-+ set_list->members_size = set->type->list_members_size(set);
-+ if (*used + set_list->members_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in set spefific members data */
-+ set->type->list_members(set, data + *used);
-+ *used += set_list->members_size;
-+ read_unlock_bh(&set->lock);
-+
-+ /* Bindings */
-+
-+ /* Get and ensure set specific bindings size */
-+ set_list->bindings_size = 0;
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
-+ set->id, &set_list->bindings_size);
-+ if (*used + set_list->bindings_size > len)
-+ goto not_enough_mem;
-+
-+ /* Fill in set spefific bindings data */
-+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
-+ return 0;
-+
-+ unlock_set:
-+ read_unlock_bh(&set->lock);
-+ not_enough_mem:
-+ DP("not enough mem, try again");
-+ return -EAGAIN;
-+}
-+
-+/*
-+ * Save sets
-+ */
-+static int ip_set_save_set(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ struct ip_set *set;
-+ struct ip_set_save *set_save;
-+
-+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
-+
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_save) > len)
-+ goto not_enough_mem;
-+ *used += sizeof(struct ip_set_save);
-+
-+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
-+ data, data + *used);
-+
-+ read_lock_bh(&set->lock);
-+ /* Get and ensure set specific header size */
-+ set_save->header_size = set->type->header_size;
-+ if (*used + set_save->header_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in the header */
-+ set_save->index = index;
-+ set_save->binding = set->binding;
-+
-+ /* Fill in set spefific header data */
-+ set->type->list_header(set, data + *used);
-+ *used += set_save->header_size;
-+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
-+ /* Get and ensure set specific members size */
-+ set_save->members_size = set->type->list_members_size(set);
-+ if (*used + set_save->members_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in set spefific members data */
-+ set->type->list_members(set, data + *used);
-+ *used += set_save->members_size;
-+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
-+ return 0;
-+
-+ unlock_set:
-+ read_unlock_bh(&set->lock);
-+ not_enough_mem:
-+ DP("not enough mem, try again");
-+ return -EAGAIN;
-+}
-+
-+static inline void
-+__set_hash_save_bindings(struct ip_set_hash *set_hash,
-+ ip_set_id_t id,
-+ void *data,
-+ int *used,
-+ int len,
-+ int *res)
-+{
-+ if (*res == 0
-+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
-+ /* Ensure bindings size */
-+ if (*used + sizeof(struct ip_set_hash_save) > len) {
-+ *res = -ENOMEM;
-+ return;
-+ }
-+ hash_save->id = set_hash->id;
-+ hash_save->ip = set_hash->ip;
-+ hash_save->binding = set_hash->binding;
-+ *used += sizeof(struct ip_set_hash_save);
-+ }
-+}
-+
-+static int ip_set_save_bindings(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ int res = 0;
-+ struct ip_set_save *set_save;
-+
-+ DP("used %u, len %u", *used, len);
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_save) > len)
-+ return -ENOMEM;
-+
-+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
-+ set_save->index = IP_SET_INVALID_ID;
-+ set_save->header_size = 0;
-+ set_save->members_size = 0;
-+ *used += sizeof(struct ip_set_save);
-+
-+ DP("marker added used %u, len %u", *used, len);
-+ /* Fill in bindings data */
-+ if (index != IP_SET_INVALID_ID)
-+ /* Sets are identified by id in hash */
-+ index = ip_set_list[index]->id;
-+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
-+
-+ return res;
-+}
-+
-+/*
-+ * Restore sets
-+ */
-+static int ip_set_restore(void *data,
-+ int len)
-+{
-+ int res = 0;
-+ int line = 0, used = 0, members_size;
-+ struct ip_set *set;
-+ struct ip_set_hash_save *hash_save;
-+ struct ip_set_restore *set_restore;
-+ ip_set_id_t index;
-+
-+ /* Loop to restore sets */
-+ while (1) {
-+ line++;
-+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
-+ /* Get and ensure header size */
-+ if (used + sizeof(struct ip_set_restore) > len)
-+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
-+ used += sizeof(struct ip_set_restore);
-+
-+ /* Ensure data size */
-+ if (used
-+ + set_restore->header_size
-+ + set_restore->members_size > len)
-+ return line;
-+
-+ /* Check marker */
-+ if (set_restore->index == IP_SET_INVALID_ID) {
-+ line--;
-+ goto bindings;
-+ }
-+
-+ /* Try to create the set */
-+ DP("restore %s %s", set_restore->name, set_restore->typename);
-+ res = ip_set_create(set_restore->name,
-+ set_restore->typename,
-+ set_restore->index,
-+ data + used,
-+ set_restore->header_size);
-+
-+ if (res != 0)
-+ return line;
-+ used += set_restore->header_size;
-+
-+ index = ip_set_find_byindex(set_restore->index);
-+ DP("index %u, restore_index %u", index, set_restore->index);
-+ if (index != set_restore->index)
-+ return line;
-+ /* Try to restore members data */
-+ set = ip_set_list[index];
-+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
-+ while (members_size + set->type->reqsize <=
-+ set_restore->members_size) {
-+ line++;
-+ DP("members: %u, line %u", members_size, line);
-+ res = __ip_set_addip(index,
-+ data + used + members_size,
-+ set->type->reqsize);
-+ if (!(res == 0 || res == -EEXIST))
-+ return line;
-+ members_size += set->type->reqsize;
-+ }
-+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
-+ if (members_size != set_restore->members_size)
-+ return line++;
-+ used += set_restore->members_size;
-+ }
-+
-+ bindings:
-+ /* Loop to restore bindings */
-+ while (used < len) {
-+ line++;
-+
-+ DP("restore binding, line %u", line);
-+ /* Get and ensure size */
-+ if (used + sizeof(struct ip_set_hash_save) > len)
-+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
-+ used += sizeof(struct ip_set_hash_save);
-+
-+ /* hash_save->id is used to store the index */
-+ index = ip_set_find_byindex(hash_save->id);
-+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
-+ if (index != hash_save->id)
-+ return line;
-+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
-+ DP("corrupt binding set index %u", hash_save->binding);
-+ return line;
-+ }
-+ set = ip_set_list[hash_save->id];
-+ /* Null valued IP means default binding */
-+ if (hash_save->ip)
-+ res = ip_set_hash_add(set->id,
-+ hash_save->ip,
-+ hash_save->binding);
-+ else {
-+ IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
-+ write_lock_bh(&ip_set_lock);
-+ set->binding = hash_save->binding;
-+ __ip_set_get(set->binding);
-+ write_unlock_bh(&ip_set_lock);
-+ DP("default binding: %u", set->binding);
-+ }
-+ if (res != 0)
-+ return line;
-+ }
-+ if (used != len)
-+ return line;
-+
-+ return 0;
-+}
-+
-+static int
-+ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
-+{
-+ void *data;
-+ int res = 0; /* Assume OK */
-+ unsigned *op;
-+ struct ip_set_req_adt *req_adt;
-+ ip_set_id_t index = IP_SET_INVALID_ID;
-+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
-+ struct fn_table {
-+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
-+ } adtfn_table[] =
-+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
-+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
-+ };
-+
-+ DP("optval=%d, user=%p, len=%d", optval, user, len);
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (optval != SO_IP_SET)
-+ return -EBADF;
-+ if (len <= sizeof(unsigned)) {
-+ ip_set_printk("short userdata (want >%zu, got %u)",
-+ sizeof(unsigned), len);
-+ return -EINVAL;
-+ }
-+ data = vmalloc(len);
-+ if (!data) {
-+ DP("out of mem for %u bytes", len);
-+ return -ENOMEM;
-+ }
-+ if (copy_from_user(data, user, len) != 0) {
-+ res = -EFAULT;
-+ goto done;
-+ }
-+ if (down_interruptible(&ip_set_app_mutex)) {
-+ res = -EINTR;
-+ goto done;
-+ }
-+
-+ op = (unsigned *)data;
-+ DP("op=%x", *op);
-+
-+ if (*op < IP_SET_OP_VERSION) {
-+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
-+ res = -EPROTO;
-+ goto done;
-+ }
-+ }
-+
-+ switch (*op) {
-+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
-+
-+ if (len < sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+ res = ip_set_create(req_create->name,
-+ req_create->typename,
-+ IP_SET_INVALID_ID,
-+ data + sizeof(struct ip_set_req_create),
-+ len - sizeof(struct ip_set_req_create));
-+ goto done;
-+ }
-+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
-+
-+ if (len != sizeof(struct ip_set_req_std)) {
-+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_std), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
-+ /* Destroy all sets */
-+ index = IP_SET_INVALID_ID;
-+ } else {
-+ req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_destroy->name);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+
-+ res = ip_set_destroy(index);
-+ goto done;
-+ }
-+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
-+
-+ if (len != sizeof(struct ip_set_req_std)) {
-+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_std), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all sets */
-+ index = IP_SET_INVALID_ID;
-+ } else {
-+ req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_flush->name);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ res = ip_set_flush(index);
-+ goto done;
-+ }
-+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
-+
-+ if (len != sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ index = ip_set_find_byname(req_rename->name);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ res = ip_set_rename(index, req_rename->typename);
-+ goto done;
-+ }
-+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
-+ ip_set_id_t to_index;
-+
-+ if (len != sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("invalid SWAP data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ index = ip_set_find_byname(req_swap->name);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ to_index = ip_set_find_byname(req_swap->typename);
-+ if (to_index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ res = ip_set_swap(index, to_index);
-+ goto done;
-+ }
-+ default:
-+ break; /* Set identified by id */
-+ }
-+
-+ /* There we may have add/del/test/bind/unbind/test_bind operations */
-+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
-+ res = -EBADMSG;
-+ goto done;
-+ }
-+ adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
-+
-+ if (len < sizeof(struct ip_set_req_adt)) {
-+ ip_set_printk("short data in adt request (want >=%zu, got %u)",
-+ sizeof(struct ip_set_req_adt), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_adt = (struct ip_set_req_adt *) data;
-+
-+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
-+ if (!(*op == IP_SET_OP_UNBIND_SET
-+ && req_adt->index == IP_SET_INVALID_ID)) {
-+ index = ip_set_find_byindex(req_adt->index);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ res = adtfn(index, data, len);
-+
-+ done:
-+ up(&ip_set_app_mutex);
-+ vfree(data);
-+ if (res > 0)
-+ res = 0;
-+ DP("final result %d", res);
-+ return res;
-+}
-+
-+static int
-+ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
-+{
-+ int res = 0;
-+ unsigned *op;
-+ ip_set_id_t index = IP_SET_INVALID_ID;
-+ void *data;
-+ int copylen = *len;
-+
-+ DP("optval=%d, user=%p, len=%d", optval, user, *len);
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (optval != SO_IP_SET)
-+ return -EBADF;
-+ if (*len < sizeof(unsigned)) {
-+ ip_set_printk("short userdata (want >=%zu, got %d)",
-+ sizeof(unsigned), *len);
-+ return -EINVAL;
-+ }
-+ data = vmalloc(*len);
-+ if (!data) {
-+ DP("out of mem for %d bytes", *len);
-+ return -ENOMEM;
-+ }
-+ if (copy_from_user(data, user, *len) != 0) {
-+ res = -EFAULT;
-+ goto done;
-+ }
-+ if (down_interruptible(&ip_set_app_mutex)) {
-+ res = -EINTR;
-+ goto done;
-+ }
-+
-+ op = (unsigned *) data;
-+ DP("op=%x", *op);
-+
-+ if (*op < IP_SET_OP_VERSION) {
-+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
-+ res = -EPROTO;
-+ goto done;
-+ }
-+ }
-+
-+ switch (*op) {
-+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_version)) {
-+ ip_set_printk("invalid VERSION (want %zu, got %d)",
-+ sizeof(struct ip_set_req_version),
-+ *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_version->version = IP_SET_PROTOCOL_VERSION;
-+ res = copy_to_user(user, req_version,
-+ sizeof(struct ip_set_req_version));
-+ goto done;
-+ }
-+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_get_set)) {
-+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
-+ sizeof(struct ip_set_req_get_set), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_get->set.name);
-+ req_get->set.index = index;
-+ goto copy;
-+ }
-+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_get_set)) {
-+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
-+ sizeof(struct ip_set_req_get_set), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byindex(req_get->set.index);
-+ strncpy(req_get->set.name,
-+ index == IP_SET_INVALID_ID ? ""
-+ : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
-+ goto copy;
-+ }
-+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_adt_get)) {
-+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
-+ sizeof(struct ip_set_req_adt_get), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_get->set.name);
-+ if (index != IP_SET_INVALID_ID) {
-+ req_get->set.index = index;
-+ strncpy(req_get->typename,
-+ ip_set_list[index]->type->typename,
-+ IP_SET_MAXNAMELEN - 1);
-+ } else {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
-+ ip_set_id_t i;
-+
-+ if (*len != sizeof(struct ip_set_req_max_sets)) {
-+ ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
-+ sizeof(struct ip_set_req_max_sets), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
-+ req_max_sets->set.index = IP_SET_INVALID_ID;
-+ } else {
-+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_max_sets->set.index =
-+ ip_set_find_byname(req_max_sets->set.name);
-+ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ req_max_sets->max_sets = ip_set_max;
-+ req_max_sets->sets = 0;
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL)
-+ req_max_sets->sets++;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_LIST_SIZE:
-+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
-+ struct ip_set_name_list *name_list;
-+ struct ip_set *set;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_setnames)) {
-+ ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_setnames), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_setnames->size = 0;
-+ used = sizeof(struct ip_set_req_setnames);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] == NULL)
-+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
-+ used += sizeof(struct ip_set_name_list);
-+ if (used > copylen) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ set = ip_set_list[i];
-+ /* Fill in index, name, etc. */
-+ name_list->index = i;
-+ name_list->id = set->id;
-+ strncpy(name_list->name,
-+ set->name,
-+ IP_SET_MAXNAMELEN - 1);
-+ strncpy(name_list->typename,
-+ set->type->typename,
-+ IP_SET_MAXNAMELEN - 1);
-+ DP("filled %s of type %s, index %u\n",
-+ name_list->name, name_list->typename,
-+ name_list->index);
-+ if (!(req_setnames->index == IP_SET_INVALID_ID
-+ || req_setnames->index == i))
-+ continue;
-+ /* Update size */
-+ switch (*op) {
-+ case IP_SET_OP_LIST_SIZE: {
-+ req_setnames->size += sizeof(struct ip_set_list)
-+ + set->type->header_size
-+ + set->type->list_members_size(set);
-+ /* Sets are identified by id in the hash */
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
-+ set->id, &req_setnames->size);
-+ break;
-+ }
-+ case IP_SET_OP_SAVE_SIZE: {
-+ req_setnames->size += sizeof(struct ip_set_save)
-+ + set->type->header_size
-+ + set->type->list_members_size(set);
-+ FOREACH_HASH_DO(__set_hash_bindings_size_save,
-+ set->id, &req_setnames->size);
-+ break;
-+ }
-+ default:
-+ break;
-+ }
-+ }
-+ if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_list)) {
-+ ip_set_printk("short LIST (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_list), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ index = req_list->index;
-+ if (index != IP_SET_INVALID_ID
-+ && ip_set_find_byindex(index) != index) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ used = 0;
-+ if (index == IP_SET_INVALID_ID) {
-+ /* List all sets */
-+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
-+ res = ip_set_list_set(i, data, &used, *len);
-+ }
-+ } else {
-+ /* List an individual set */
-+ res = ip_set_list_set(index, data, &used, *len);
-+ }
-+ if (res != 0)
-+ goto done;
-+ else if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_list)) {
-+ ip_set_printk("short SAVE (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_list), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ index = req_save->index;
-+ if (index != IP_SET_INVALID_ID
-+ && ip_set_find_byindex(index) != index) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ used = 0;
-+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
-+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
-+ res = ip_set_save_set(i, data, &used, *len);
-+ }
-+ } else {
-+ /* Save an individual set */
-+ res = ip_set_save_set(index, data, &used, *len);
-+ }
-+ if (res == 0)
-+ res = ip_set_save_bindings(index, data, &used, *len);
-+
-+ if (res != 0)
-+ goto done;
-+ else if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
-+ int line;
-+
-+ if (*len < sizeof(struct ip_set_req_setnames)
-+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
-+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
-+ if (line != 0) {
-+ res = -EAGAIN;
-+ req_restore->size = line;
-+ copylen = sizeof(struct ip_set_req_setnames);
-+ goto copy;
-+ }
-+ goto done;
-+ }
-+ default:
-+ res = -EBADMSG;
-+ goto done;
-+ } /* end of switch(op) */
-+
-+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
-+ && ip_set_list[index]
-+ ? ip_set_list[index]->name
-+ : ":all:", copylen);
-+ res = copy_to_user(user, data, copylen);
-+
-+ done:
-+ up(&ip_set_app_mutex);
-+ vfree(data);
-+ if (res > 0)
-+ res = 0;
-+ DP("final result %d", res);
-+ return res;
-+}
-+
-+static struct nf_sockopt_ops so_set = {
-+ .pf = PF_INET,
-+ .set_optmin = SO_IP_SET,
-+ .set_optmax = SO_IP_SET + 1,
-+ .set = &ip_set_sockfn_set,
-+ .get_optmin = SO_IP_SET,
-+ .get_optmax = SO_IP_SET + 1,
-+ .get = &ip_set_sockfn_get,
-+ .use = 0
-+};
-+
-+static int max_sets, hash_size;
-+module_param(max_sets, int, 0600);
-+MODULE_PARM_DESC(max_sets, "maximal number of sets");
-+module_param(hash_size, int, 0600);
-+MODULE_PARM_DESC(hash_size, "hash size for bindings");
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("module implementing core IP set support");
-+
-+static int __init init(void)
-+{
-+ int res;
-+ ip_set_id_t i;
-+
-+ get_random_bytes(&ip_set_hash_random, 4);
-+ if (max_sets)
-+ ip_set_max = max_sets;
-+ ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
-+ if (!ip_set_list) {
-+ printk(KERN_ERR "Unable to create ip_set_list\n");
-+ return -ENOMEM;
-+ }
-+ memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
-+ if (hash_size)
-+ ip_set_bindings_hash_size = hash_size;
-+ ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
-+ if (!ip_set_hash) {
-+ printk(KERN_ERR "Unable to create ip_set_hash\n");
-+ vfree(ip_set_list);
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < ip_set_bindings_hash_size; i++)
-+ INIT_LIST_HEAD(&ip_set_hash[i]);
-+
-+ INIT_LIST_HEAD(&set_type_list);
-+
-+ res = nf_register_sockopt(&so_set);
-+ if (res != 0) {
-+ ip_set_printk("SO_SET registry failed: %d", res);
-+ vfree(ip_set_list);
-+ vfree(ip_set_hash);
-+ return res;
-+ }
-+ return 0;
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* There can't be any existing set or binding */
-+ nf_unregister_sockopt(&so_set);
-+ vfree(ip_set_list);
-+ vfree(ip_set_hash);
-+ DP("these are the famous last words");
-+}
-+
-+EXPORT_SYMBOL(ip_set_register_set_type);
-+EXPORT_SYMBOL(ip_set_unregister_set_type);
-+
-+EXPORT_SYMBOL(ip_set_get_byname);
-+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
-+
-+EXPORT_SYMBOL(ip_set_addip_kernel);
-+EXPORT_SYMBOL(ip_set_delip_kernel);
-+EXPORT_SYMBOL(ip_set_testip_kernel);
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_iphash.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_iphash.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,413 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip hash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_iphash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements > limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t id, *elem;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id(set, ip, hash_ip);
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->netmask = map->netmask;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iphash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_ipmap.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_ipmap.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,327 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the single bitmap type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_set_ipmap.h>
-+
-+static inline ip_set_ip_t
-+ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
-+{
-+ return (ip - map->first_ip)/map->hosts;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
-+
-+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
-+ return -EEXIST;
-+
-+ return 0;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
-+ return -EEXIST;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->netmask = req->netmask;
-+
-+ if (req->netmask == 0xFFFFFFFF) {
-+ map->hosts = 1;
-+ map->sizeid = map->last_ip - map->first_ip + 1;
-+ } else {
-+ unsigned int mask_bits, netmask_bits;
-+ ip_set_ip_t mask;
-+
-+ map->first_ip &= map->netmask; /* Should we better bark? */
-+
-+ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
-+ netmask_bits = mask_to_bits(map->netmask);
-+
-+ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
-+ || netmask_bits <= mask_bits)
-+ return -ENOEXEC;
-+
-+ DP("mask_bits %u, netmask_bits %u",
-+ mask_bits, netmask_bits);
-+ map->hosts = 2 << (32 - netmask_bits - 1);
-+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
-+ }
-+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
-+ return -ENOEXEC;
-+ }
-+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+ header->netmask = map->netmask;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_ipporthash.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_ipporthash.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,535 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip+port hash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+ struct iphdr *iph = ip_hdr(skb);
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdrlen(skb), &tcph, sizeof(tcph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdrlen(skb), &udph, sizeof(udph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = HASH_IP(map, ip, port);
-+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return 0;
-+
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (map->elements > limit)
-+ return -ERANGE;
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = HASH_IP(map, ip, port);
-+
-+ return __add_haship(map, *hash_ip);
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ id = hash_id(set, ip, port, hash_ip);
-+
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_iptree.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_iptree.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,571 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the iptree type */
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
-+#include <linux/netfilter_ipv4/ip_set_iptree.h>
-+
-+static int limit = MAX_RANGE;
-+
-+/* Garbage collection interval in seconds: */
-+#define IPTREE_GC_TIME 5*60
-+/* Sleep so many milliseconds before trying again
-+ * to delete the gc timer at destroying/flushing a set */
-+#define IPTREE_DESTROY_SLEEP 100
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
-+
-+#define ABCD(a,b,c,d,addrp) do { \
-+ a = ((unsigned char *)addrp)[3]; \
-+ b = ((unsigned char *)addrp)[2]; \
-+ c = ((unsigned char *)addrp)[1]; \
-+ d = ((unsigned char *)addrp)[0]; \
-+} while (0)
-+
-+#define TESTIP_WALK(map, elem, branch) do { \
-+ if ((map)->tree[elem]) { \
-+ branch = (map)->tree[elem]; \
-+ } else \
-+ return 0; \
-+} while (0)
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
-+ TESTIP_WALK(map, a, btree);
-+ TESTIP_WALK(btree, b, ctree);
-+ TESTIP_WALK(ctree, c, dtree);
-+ DP("%lu %lu", dtree->expires[d], jiffies);
-+ return !!(map->timeout ? (time_after(dtree->expires[d], jiffies))
-+ : dtree->expires[d]);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
-+
-+#define ADDIP_WALK(map, elem, branch, type, cachep, flags) do { \
-+ if ((map)->tree[elem]) { \
-+ DP("found %u", elem); \
-+ branch = (map)->tree[elem]; \
-+ } else { \
-+ branch = (type *) \
-+ kmem_cache_alloc(cachep, flags); \
-+ if (branch == NULL) \
-+ return -ENOMEM; \
-+ memset(branch, 0, sizeof(*branch)); \
-+ (map)->tree[elem] = branch; \
-+ DP("alloc %u", elem); \
-+ } \
-+} while (0)
-+
-+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip,
-+ unsigned int __nocast flags)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+ int ret = 0;
-+
-+ if (!ip || map->elements > limit)
-+ /* We could call the garbage collector
-+ * but it's probably overkill */
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
-+ ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep, flags);
-+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep, flags);
-+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep, flags);
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
-+ ret = -EEXIST;
-+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
-+ /* Lottery */
-+ if (dtree->expires[d] == 0)
-+ dtree->expires[d] = 1;
-+ DP("%u %lu", d, dtree->expires[d]);
-+ if (ret == 0)
-+ map->elements++;
-+ return ret;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ map->timeout,
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
-+
-+#define DELIP_WALK(map, elem, branch) do { \
-+ if ((map)->tree[elem]) { \
-+ branch = (map)->tree[elem]; \
-+ } else \
-+ return -EEXIST; \
-+} while (0)
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DELIP_WALK(map, a, btree);
-+ DELIP_WALK(btree, b, ctree);
-+ DELIP_WALK(ctree, c, dtree);
-+
-+ if (dtree->expires[d]) {
-+ dtree->expires[d] = 0;
-+ map->elements--;
-+ return 0;
-+ }
-+ return -EEXIST;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+#define LOOP_WALK_BEGIN(map, i, branch) \
-+ for (i = 0; i < 256; i++) { \
-+ if (!(map)->tree[i]) \
-+ continue; \
-+ branch = (map)->tree[i]
-+
-+#define LOOP_WALK_END }
-+
-+static void ip_tree_gc(unsigned long ul_set)
-+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ unsigned char i,j,k;
-+
-+ i = j = k = 0;
-+ DP("gc: %s", set->name);
-+ write_lock_bh(&set->lock);
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]) {
-+ DP("gc: %u %u %u %u: expires %lu jiffies %lu",
-+ a, b, c, d,
-+ dtree->expires[d], jiffies);
-+ if (map->timeout
-+ && time_before(dtree->expires[d], jiffies)) {
-+ dtree->expires[d] = 0;
-+ map->elements--;
-+ } else
-+ k = 1;
-+ }
-+ }
-+ if (k == 0) {
-+ DP("gc: %s: leaf %u %u %u empty",
-+ set->name, a, b, c);
-+ kmem_cache_free(leaf_cachep, dtree);
-+ ctree->tree[c] = NULL;
-+ } else {
-+ DP("gc: %s: leaf %u %u %u not empty",
-+ set->name, a, b, c);
-+ j = 1;
-+ k = 0;
-+ }
-+ LOOP_WALK_END;
-+ if (j == 0) {
-+ DP("gc: %s: branch %u %u empty",
-+ set->name, a, b);
-+ kmem_cache_free(branch_cachep, ctree);
-+ btree->tree[b] = NULL;
-+ } else {
-+ DP("gc: %s: branch %u %u not empty",
-+ set->name, a, b);
-+ i = 1;
-+ j = k = 0;
-+ }
-+ LOOP_WALK_END;
-+ if (i == 0) {
-+ DP("gc: %s: branch %u empty",
-+ set->name, a);
-+ kmem_cache_free(branch_cachep, btree);
-+ map->tree[a] = NULL;
-+ } else {
-+ DP("gc: %s: branch %u not empty",
-+ set->name, a);
-+ i = j = k = 0;
-+ }
-+ LOOP_WALK_END;
-+ write_unlock_bh(&set->lock);
-+
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static inline void init_gc_timer(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ /* Even if there is no timeout for the entries,
-+ * we still have to call gc because delete
-+ * do not clean up empty branches */
-+ map->gc_interval = IPTREE_GC_TIME;
-+ init_timer(&map->gc);
-+ map->gc.data = (unsigned long) set;
-+ map->gc.function = ip_tree_gc;
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
-+ struct ip_set_iptree *map;
-+
-+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iptree));
-+ return -ENOMEM;
-+ }
-+ memset(map, 0, sizeof(*map));
-+ map->timeout = req->timeout;
-+ map->elements = 0;
-+ set->data = map;
-+
-+ init_gc_timer(set);
-+
-+ return 0;
-+}
-+
-+static void __flush(struct ip_set_iptree *map)
-+{
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ kmem_cache_free(leaf_cachep, dtree);
-+ LOOP_WALK_END;
-+ kmem_cache_free(branch_cachep, ctree);
-+ LOOP_WALK_END;
-+ kmem_cache_free(branch_cachep, btree);
-+ LOOP_WALK_END;
-+ map->elements = 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ /* gc might be running */
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREE_DESTROY_SLEEP);
-+ __flush(map);
-+ kfree(map);
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ unsigned int timeout = map->timeout;
-+
-+ /* gc might be running */
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREE_DESTROY_SLEEP);
-+ __flush(map);
-+ memset(map, 0, sizeof(*map));
-+ map->timeout = timeout;
-+
-+ init_gc_timer(set);
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
-+
-+ header->timeout = map->timeout;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ unsigned int count = 0;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
-+ count++;
-+ }
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+
-+ DP("members %u", count);
-+ return (count * sizeof(struct ip_set_req_iptree));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ size_t offset = 0;
-+ struct ip_set_req_iptree *entry;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
-+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
-+ entry->timeout = !map->timeout ? 0
-+ : (dtree->expires[d] - jiffies)/HZ;
-+ offset += sizeof(struct ip_set_req_iptree);
-+ }
-+ }
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+}
-+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptree type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ int ret;
-+
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+ if (!branch_cachep) {
-+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+ if (!leaf_cachep) {
-+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
-+ ret = -ENOMEM;
-+ goto free_branch;
-+ }
-+ ret = ip_set_register_set_type(&ip_set_iptree);
-+ if (ret == 0)
-+ goto out;
-+
-+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
-+ kmem_cache_destroy(branch_cachep);
-+ out:
-+ return ret;
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iptree);
-+ kmem_cache_destroy(leaf_cachep);
-+ kmem_cache_destroy(branch_cachep);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_macipmap.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_macipmap.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,353 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the macipmap type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->ip < map->first_ip || req->ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = req->ip;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
-+ return (memcmp(req->ethernet,
-+ &table[req->ip - map->first_ip].ethernet,
-+ ETH_ALEN) == 0);
-+ } else {
-+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
-+ }
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(ip_hdr(skb)->saddr),
-+ NIPQUAD(ip_hdr(skb)->daddr));
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return 0;
-+
-+ *hash_ip = ip;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
-+ /* Is mac pointer valid?
-+ * If so, compare... */
-+ return (skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+ && (memcmp(eth_hdr(skb)->h_source,
-+ &table[ip - map->first_ip].ethernet,
-+ ETH_ALEN) == 0));
-+ } else {
-+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
-+ }
-+}
-+
-+/* returns 0 on success */
-+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
-+ return -EEXIST;
-+
-+ *hash_ip = ip;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
-+ return 0;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+
-+ if (!(skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
-+ return -EINVAL;
-+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
-+ return -EEXIST;
-+
-+ *hash_ip = ip;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
-+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+ header->flags = map->flags;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("macipmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_nethash.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_nethash.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,481 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing a cidr nethash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_nethash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = pack(ip, cidr);
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ __u32 id = UINT_MAX;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
-+ if (id != UINT_MAX)
-+ break;
-+ }
-+ return id;
-+}
-+
-+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (!ip || map->elements > limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+
-+ return __addip_base(map, *hash_ip);
-+}
-+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
-+
-+ return ret;
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ ip_set_ip_t id, *elem;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+ ? ip_hdr(skb)->saddr
-+ : ip_hdr(skb)->daddr);
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("nethash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_portmap.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ip_set_portmap.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,334 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing a port set type as a bitmap */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_portmap.h>
-+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+ struct iphdr *iph = ip_hdr(skb);
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
-+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+
-+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
-+}
-+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testport(set, req->port, hash_port);
-+}
-+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
-+
-+ res = __testport(set, port, hash_port);
-+
-+ return (res < 0 ? 0 : res);
-+}
-+
-+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
-+}
-+
-+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addport(set, req->port, hash_port);
-+}
-+
-+static int
-+addport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
-+}
-+
-+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
-+}
-+
-+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delport(set, req->port, hash_port);
-+}
-+
-+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ipt_set.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ipt_set.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,150 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module to match an IP set. */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ipt_set.h>
-+
-+static inline int
-+match_set(const struct ipt_set_info *info,
-+ const struct sk_buff *skb,
-+ int inv)
-+{
-+ if (ip_set_testip_kernel(info->index, skb, info->flags))
-+ inv = !inv;
-+ return inv;
-+}
-+
-+static int
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
-+#endif
-+{
-+ const struct ipt_set_info_match *info = matchinfo;
-+
-+ return match_set(&info->match_set,
-+ skb,
-+ info->match_set.flags[0] & IPSET_MATCH_INV);
-+}
-+
-+static int
-+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
-+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int matchsize,
-+#endif
-+ unsigned int hook_mask)
-+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
-+ ip_set_id_t index;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
-+ ip_set_printk("invalid matchsize %d", matchsize);
-+ return 0;
-+ }
-+#endif
-+
-+ index = ip_set_get_byindex(info->match_set.index);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("Cannot find set indentified by id %u to match",
-+ info->match_set.index);
-+ return 0; /* error */
-+ }
-+ if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
-+ ip_set_printk("That's nasty!");
-+ return 0; /* error */
-+ }
-+
-+ return 1;
-+}
-+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
-+ void *matchinfo)
-+#endif
-+{
-+ struct ipt_set_info_match *info = matchinfo;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
-+ ip_set_printk("invalid matchsize %d", matchsize);
-+ return;
-+ }
-+#endif
-+ ip_set_put(info->match_set.index);
-+}
-+
-+static struct ipt_match set_match = {
-+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+ .family = AF_INET,
-+#endif
-+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
-+ .checkentry = &checkentry,
-+ .destroy = &destroy,
-+ .me = THIS_MODULE
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptables IP set match module");
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
-+static int __init ipt_ipset_init(void)
-+{
-+ return ipt_register_match(&set_match);
-+}
-+
-+static void __exit ipt_ipset_fini(void)
-+{
-+ ipt_unregister_match(&set_match);
-+}
-+
-+module_init(ipt_ipset_init);
-+module_exit(ipt_ipset_fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/ipt_SET.c
-===================================================================
---- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/ipt_SET.c 2007-09-21 16:24:01.000000000 +0800
-@@ -0,0 +1,169 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* ipt_SET.c - netfilter target to manipulate IP sets */
-+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
-+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
-+#include <linux/version.h>
-+#include <linux/skbuff.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
-+#include <linux/netfilter_ipv4.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ipt_set.h>
-+
-+static unsigned int
-+target(struct sk_buff **pskb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ const void *targinfo,
-+ void *userinfo)
-+#else
-+ const void *targinfo)
-+#endif
-+{
-+ const struct ipt_set_info_target *info = targinfo;
-+
-+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_addip_kernel(info->add_set.index,
-+ *pskb,
-+ info->add_set.flags);
-+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_delip_kernel(info->del_set.index,
-+ *pskb,
-+ info->del_set.flags);
-+
-+ return IPT_CONTINUE;
-+}
-+
-+static int
-+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
-+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int targinfosize,
-+#endif
-+ unsigned int hook_mask)
-+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
-+ ip_set_id_t index;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
-+ DP("bad target info size %u", targinfosize);
-+ return 0;
-+ }
-+#endif
-+
-+ if (info->add_set.index != IP_SET_INVALID_ID) {
-+ index = ip_set_get_byindex(info->add_set.index);
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("cannot find add_set index %u as target",
-+ info->add_set.index);
-+ return 0; /* error */
-+ }
-+ }
-+
-+ if (info->del_set.index != IP_SET_INVALID_ID) {
-+ index = ip_set_get_byindex(info->del_set.index);
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("cannot find del_set index %u as target",
-+ info->del_set.index);
-+ return 0; /* error */
-+ }
-+ }
-+ if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
-+ || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
-+ ip_set_printk("That's nasty!");
-+ return 0; /* error */
-+ }
-+
-+ return 1;
-+}
-+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
-+ void *targetinfo)
-+#endif
-+{
-+ struct ipt_set_info_target *info = targetinfo;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
-+ ip_set_printk("invalid targetsize %d", targetsize);
-+ return;
-+ }
-+#endif
-+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
-+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
-+}
-+
-+static struct ipt_target SET_target = {
-+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+ .family = AF_INET,
-+#endif
-+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
-+ .checkentry = checkentry,
-+ .destroy = destroy,
-+ .me = THIS_MODULE
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptables IP set target module");
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
-+static int __init ipt_SET_init(void)
-+{
-+ return ipt_register_target(&SET_target);
-+}
-+
-+static void __exit ipt_SET_fini(void)
-+{
-+ ipt_unregister_target(&SET_target);
-+}
-+
-+module_init(ipt_SET_init);
-+module_exit(ipt_SET_fini);
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/Kconfig
-===================================================================
---- linux-2.6.23-rc6.orig/net/ipv4/netfilter/Kconfig 2007-09-21 16:24:00.000000000 +0800
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/Kconfig 2007-09-21 16:24:01.000000000 +0800
-@@ -426,5 +426,114 @@
- Allows altering the ARP packet payload: source and destination
- hardware and network addresses.
-
-+config IP_NF_SET
-+ tristate "IP set support"
-+ depends on INET && NETFILTER
-+ help
-+ This option adds IP set support to the kernel.
-+ In order to define and use sets, you need the userspace utility
-+ ipset(8).
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_MAX
-+ int "Maximum number of IP sets"
-+ default 256
-+ range 2 65534
-+ depends on IP_NF_SET
-+ help
-+ You can define here default value of the maximum number
-+ of IP sets for the kernel.
-+
-+ The value can be overriden by the 'max_sets' module
-+ parameter of the 'ip_set' module.
-+
-+config IP_NF_SET_HASHSIZE
-+ int "Hash size for bindings of IP sets"
-+ default 1024
-+ depends on IP_NF_SET
-+ help
-+ You can define here default value of the hash size for
-+ bindings of IP sets.
-+
-+ The value can be overriden by the 'hash_size' module
-+ parameter of the 'ip_set' module.
-+
-+config IP_NF_SET_IPMAP
-+ tristate "ipmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_MACIPMAP
-+ tristate "macipmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the macipmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_PORTMAP
-+ tristate "portmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the portmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPHASH
-+ tristate "iphash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iphash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_NETHASH
-+ tristate "nethash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the nethash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPPORTHASH
-+ tristate "ipporthash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipporthash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPTREE
-+ tristate "iptree set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iptree set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_MATCH_SET
-+ tristate "set match support"
-+ depends on IP_NF_SET
-+ help
-+ Set matching matches against given IP sets.
-+ You need the ipset utility to create and set up the sets.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_TARGET_SET
-+ tristate "SET target support"
-+ depends on IP_NF_SET
-+ help
-+ The SET target makes possible to add/delete entries
-+ in IP sets.
-+ You need the ipset utility to create and set up the sets.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+
- endmenu
-
-Index: linux-2.6.23-rc6/net/ipv4/netfilter/Makefile
-===================================================================
---- linux-2.6.23-rc6.orig/net/ipv4/netfilter/Makefile 2007-09-21 16:24:00.000000000 +0800
-+++ linux-2.6.23-rc6/net/ipv4/netfilter/Makefile 2007-09-21 16:24:01.000000000 +0800
-@@ -48,6 +48,7 @@
- obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
- obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
- obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
-+obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
- obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
- obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
- obj-$(CONFIG_IP_NF_MATCH_LAYER7) += ipt_layer7.o
-@@ -64,6 +65,17 @@
- obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
- obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
- obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
-+obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
-+
-+# sets
-+obj-$(CONFIG_IP_NF_SET) += ip_set.o
-+obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
-+obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
-+obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
-+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
-+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
-+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
-+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
-
- # generic ARP tables
- obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
--- /dev/null
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,498 @@
++#ifndef _IP_SET_H
++#define _IP_SET_H
++
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#if 0
++#define IP_SET_DEBUG
++#endif
++
++/*
++ * A sockopt of such quality has hardly ever been seen before on the open
++ * market! This little beauty, hardly ever used: above 64, so it's
++ * traditionally used for firewalling, not touched (even once!) by the
++ * 2.0, 2.2 and 2.4 kernels!
++ *
++ * Comes with its own certificate of authenticity, valid anywhere in the
++ * Free world!
++ *
++ * Rusty, 19.4.2000
++ */
++#define SO_IP_SET 83
++
++/*
++ * Heavily modify by Joakim Axelsson 08.03.2002
++ * - Made it more modulebased
++ *
++ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
++ * - bindings added
++ * - in order to "deal with" backward compatibility, renamed to ipset
++ */
++
++/*
++ * Used so that the kernel module and ipset-binary can match their versions
++ */
++#define IP_SET_PROTOCOL_VERSION 2
++
++#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
++
++/* Lets work with our own typedef for representing an IP address.
++ * We hope to make the code more portable, possibly to IPv6...
++ *
++ * The representation works in HOST byte order, because most set types
++ * will perform arithmetic operations and compare operations.
++ *
++ * For now the type is an uint32_t.
++ *
++ * Make sure to ONLY use the functions when translating and parsing
++ * in order to keep the host byte order and make it more portable:
++ * parse_ip()
++ * parse_mask()
++ * parse_ipandmask()
++ * ip_tostring()
++ * (Joakim: where are they???)
++ */
++
++typedef uint32_t ip_set_ip_t;
++
++/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
++ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
++ */
++typedef uint16_t ip_set_id_t;
++
++#define IP_SET_INVALID_ID 65535
++
++/* How deep we follow bindings */
++#define IP_SET_MAX_BINDINGS 6
++
++/*
++ * Option flags for kernel operations (ipt_set_info)
++ */
++#define IPSET_SRC 0x01 /* Source match/add */
++#define IPSET_DST 0x02 /* Destination match/add */
++#define IPSET_MATCH_INV 0x04 /* Inverse matching */
++
++/*
++ * Set features
++ */
++#define IPSET_TYPE_IP 0x01 /* IP address type of set */
++#define IPSET_TYPE_PORT 0x02 /* Port type of set */
++#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
++#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++
++/* Reserved keywords */
++#define IPSET_TOKEN_DEFAULT ":default:"
++#define IPSET_TOKEN_ALL ":all:"
++
++/* SO_IP_SET operation constants, and their request struct types.
++ *
++ * Operation ids:
++ * 0-99: commands with version checking
++ * 100-199: add/del/test/bind/unbind
++ * 200-299: list, save, restore
++ */
++
++/* Single shot operations:
++ * version, create, destroy, flush, rename and swap
++ *
++ * Sets are identified by name.
++ */
++
++#define IP_SET_REQ_STD \
++ unsigned op; \
++ unsigned version; \
++ char name[IP_SET_MAXNAMELEN]
++
++#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
++struct ip_set_req_create {
++ IP_SET_REQ_STD;
++ char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
++struct ip_set_req_std {
++ IP_SET_REQ_STD;
++};
++
++#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
++/* Uses ip_set_req_std */
++
++#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
++/* Uses ip_set_req_create */
++
++#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
++/* Uses ip_set_req_create */
++
++union ip_set_name_index {
++ char name[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++};
++
++#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
++struct ip_set_req_get_set {
++ unsigned op;
++ unsigned version;
++ union ip_set_name_index set;
++};
++
++#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
++/* Uses ip_set_req_get_set */
++
++#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
++struct ip_set_req_version {
++ unsigned op;
++ unsigned version;
++};
++
++/* Double shots operations:
++ * add, del, test, bind and unbind.
++ *
++ * First we query the kernel to get the index and type of the target set,
++ * then issue the command. Validity of IP is checked in kernel in order
++ * to minimalize sockopt operations.
++ */
++
++/* Get minimal set data for add/del/test/bind/unbind IP */
++#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
++struct ip_set_req_adt_get {
++ unsigned op;
++ unsigned version;
++ union ip_set_name_index set;
++ char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_REQ_BYINDEX \
++ unsigned op; \
++ ip_set_id_t index;
++
++struct ip_set_req_adt {
++ IP_SET_REQ_BYINDEX;
++};
++
++#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++struct ip_set_req_bind {
++ IP_SET_REQ_BYINDEX;
++ char binding[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
++/* Uses ip_set_req_bind, with type speficic addage
++ * index = 0 means unbinding for all sets */
++
++#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++
++/* Multiple shots operations: list, save, restore.
++ *
++ * - check kernel version and query the max number of sets
++ * - get the basic information on all sets
++ * and size required for the next step
++ * - get actual set data: header, data, bindings
++ */
++
++/* Get max_sets and the index of a queried set
++ */
++#define IP_SET_OP_MAX_SETS 0x00000020
++struct ip_set_req_max_sets {
++ unsigned op;
++ unsigned version;
++ ip_set_id_t max_sets; /* max_sets */
++ ip_set_id_t sets; /* real number of sets */
++ union ip_set_name_index set; /* index of set if name used */
++};
++
++/* Get the id and name of the sets plus size for next step */
++#define IP_SET_OP_LIST_SIZE 0x00000201
++#define IP_SET_OP_SAVE_SIZE 0x00000202
++struct ip_set_req_setnames {
++ unsigned op;
++ ip_set_id_t index; /* set to list/save */
++ size_t size; /* size to get setdata/bindings */
++ /* followed by sets number of struct ip_set_name_list */
++};
++
++struct ip_set_name_list {
++ char name[IP_SET_MAXNAMELEN];
++ char typename[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++ ip_set_id_t id;
++};
++
++/* The actual list operation */
++#define IP_SET_OP_LIST 0x00000203
++struct ip_set_req_list {
++ IP_SET_REQ_BYINDEX;
++ /* sets number of struct ip_set_list in reply */
++};
++
++struct ip_set_list {
++ ip_set_id_t index;
++ ip_set_id_t binding;
++ u_int32_t ref;
++ size_t header_size; /* Set header data of header_size */
++ size_t members_size; /* Set members data of members_size */
++ size_t bindings_size; /* Set bindings data of bindings_size */
++};
++
++struct ip_set_hash_list {
++ ip_set_ip_t ip;
++ ip_set_id_t binding;
++};
++
++/* The save operation */
++#define IP_SET_OP_SAVE 0x00000204
++/* Uses ip_set_req_list, in the reply replaced by
++ * sets number of struct ip_set_save plus a marker
++ * ip_set_save followed by ip_set_hash_save structures.
++ */
++struct ip_set_save {
++ ip_set_id_t index;
++ ip_set_id_t binding;
++ size_t header_size; /* Set header data of header_size */
++ size_t members_size; /* Set members data of members_size */
++};
++
++/* At restoring, ip == 0 means default binding for the given set: */
++struct ip_set_hash_save {
++ ip_set_ip_t ip;
++ ip_set_id_t id;
++ ip_set_id_t binding;
++};
++
++/* The restore operation */
++#define IP_SET_OP_RESTORE 0x00000205
++/* Uses ip_set_req_setnames followed by ip_set_restore structures
++ * plus a marker ip_set_restore, followed by ip_set_hash_save
++ * structures.
++ */
++struct ip_set_restore {
++ char name[IP_SET_MAXNAMELEN];
++ char typename[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++ size_t header_size; /* Create data of header_size */
++ size_t members_size; /* Set members data of members_size */
++};
++
++static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
++{
++ return 4 * ((((b - a + 8) / 8) + 3) / 4);
++}
++
++#ifdef __KERNEL__
++
++#define ip_set_printk(format, args...) \
++ do { \
++ printk("%s: %s: ", __FILE__, __FUNCTION__); \
++ printk(format "\n" , ## args); \
++ } while (0)
++
++#if defined(IP_SET_DEBUG)
++#define DP(format, args...) \
++ do { \
++ printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
++ printk(format "\n" , ## args); \
++ } while (0)
++#define IP_SET_ASSERT(x) \
++ do { \
++ if (!(x)) \
++ printk("IP_SET_ASSERT: %s:%i(%s)\n", \
++ __FILE__, __LINE__, __FUNCTION__); \
++ } while (0)
++#else
++#define DP(format, args...)
++#define IP_SET_ASSERT(x)
++#endif
++
++struct ip_set;
++
++/*
++ * The ip_set_type definition - one per set type, e.g. "ipmap".
++ *
++ * Each individual set has a pointer, set->type, going to one
++ * of these structures. Function pointers inside the structure implement
++ * the real behaviour of the sets.
++ *
++ * If not mentioned differently, the implementation behind the function
++ * pointers of a set_type, is expected to return 0 if ok, and a negative
++ * errno (e.g. -EINVAL) on error.
++ */
++struct ip_set_type {
++ struct list_head list; /* next in list of set types */
++
++ /* test for IP in set (kernel: iptables -m set src|dst)
++ * return 0 if not in set, 1 if in set.
++ */
++ int (*testip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* test for IP in set (userspace: ipset -T set IP)
++ * return 0 if not in set, 1 if in set.
++ */
++ int (*testip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /*
++ * Size of the data structure passed by when
++ * adding/deletin/testing an entry.
++ */
++ size_t reqsize;
++
++ /* Add IP into set (userspace: ipset -A set IP)
++ * Return -EEXIST if the address is already in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address was not already in the set, 0 is returned.
++ */
++ int (*addip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
++ * Return -EEXIST if the address is already in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address was not already in the set, 0 is returned.
++ */
++ int (*addip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* remove IP from set (userspace: ipset -D set --entry x)
++ * Return -EEXIST if the address is NOT in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address really was in the set, 0 is returned.
++ */
++ int (*delip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /* remove IP from set (kernel: iptables ... -j SET --entry x)
++ * Return -EEXIST if the address is NOT in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address really was in the set, 0 is returned.
++ */
++ int (*delip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* new set creation - allocated type specific items
++ */
++ int (*create) (struct ip_set *set,
++ const void *data, size_t size);
++
++ /* retry the operation after successfully tweaking the set
++ */
++ int (*retry) (struct ip_set *set);
++
++ /* set destruction - free type specific items
++ * There is no return value.
++ * Can be called only when child sets are destroyed.
++ */
++ void (*destroy) (struct ip_set *set);
++
++ /* set flushing - reset all bits in the set, or something similar.
++ * There is no return value.
++ */
++ void (*flush) (struct ip_set *set);
++
++ /* Listing: size needed for header
++ */
++ size_t header_size;
++
++ /* Listing: Get the header
++ *
++ * Fill in the information in "data".
++ * This function is always run after list_header_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
++ */
++ void (*list_header) (const struct ip_set *set,
++ void *data);
++
++ /* Listing: Get the size for the set members
++ */
++ int (*list_members_size) (const struct ip_set *set);
++
++ /* Listing: Get the set members
++ *
++ * Fill in the information in "data".
++ * This function is always run after list_member_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
++ */
++ void (*list_members) (const struct ip_set *set,
++ void *data);
++
++ char typename[IP_SET_MAXNAMELEN];
++ unsigned char features;
++ int protocol_version;
++
++ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
++ struct module *me;
++};
++
++extern int ip_set_register_set_type(struct ip_set_type *set_type);
++extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
++
++/* A generic ipset */
++struct ip_set {
++ char name[IP_SET_MAXNAMELEN]; /* the name of the set */
++ rwlock_t lock; /* lock for concurrency control */
++ ip_set_id_t id; /* set id for swapping */
++ ip_set_id_t binding; /* default binding for the set */
++ atomic_t ref; /* in kernel and in hash references */
++ struct ip_set_type *type; /* the set types */
++ void *data; /* pooltype specific data */
++};
++
++/* Structure to bind set elements to sets */
++struct ip_set_hash {
++ struct list_head list; /* list of clashing entries in hash */
++ ip_set_ip_t ip; /* ip from set */
++ ip_set_id_t id; /* set id */
++ ip_set_id_t binding; /* set we bind the element to */
++};
++
++/* register and unregister set references */
++extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
++extern void ip_set_put(ip_set_id_t id);
++
++/* API for iptables set match, and SET target */
++extern void ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern void ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_testip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_H*/
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iphash.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iphash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iphash.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,30 @@
++#ifndef __IP_SET_IPHASH_H
++#define __IP_SET_IPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iphash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iphash {
++ ip_set_ip_t *members; /* the iphash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t netmask; /* netmask */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_iphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t netmask;
++};
++
++struct ip_set_req_iphash {
++ ip_set_ip_t ip;
++};
++
++#endif /* __IP_SET_IPHASH_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipmap.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_ipmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipmap.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,56 @@
++#ifndef __IP_SET_IPMAP_H
++#define __IP_SET_IPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipmap"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_ipmap {
++ void *members; /* the ipmap proper */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ ip_set_ip_t netmask; /* subnet netmask */
++ ip_set_ip_t sizeid; /* size of set in IPs */
++ ip_set_ip_t hosts; /* number of hosts in a subnet */
++};
++
++struct ip_set_req_ipmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++ ip_set_ip_t netmask;
++};
++
++struct ip_set_req_ipmap {
++ ip_set_ip_t ip;
++};
++
++unsigned int
++mask_to_bits(ip_set_ip_t mask)
++{
++ unsigned int bits = 32;
++ ip_set_ip_t maskaddr;
++
++ if (mask == 0xFFFFFFFF)
++ return bits;
++
++ maskaddr = 0xFFFFFFFE;
++ while (--bits >= 0 && maskaddr != mask)
++ maskaddr <<= 1;
++
++ return bits;
++}
++
++ip_set_ip_t
++range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
++{
++ ip_set_ip_t mask = 0xFFFFFFFE;
++
++ *bits = 32;
++ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ mask <<= 1;
++
++ return mask;
++}
++
++#endif /* __IP_SET_IPMAP_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_ipporthash.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipporthash.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_ipporthash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipporthash.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,34 @@
++#ifndef __IP_SET_IPPORTHASH_H
++#define __IP_SET_IPPORTHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipporthash"
++#define MAX_RANGE 0x0000FFFF
++#define INVALID_PORT (MAX_RANGE + 1)
++
++struct ip_set_ipporthash {
++ ip_set_ip_t *members; /* the ipporthash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipporthash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipporthash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++};
++
++#endif /* __IP_SET_IPPORTHASH_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptree.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iptree.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptree.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,40 @@
++#ifndef __IP_SET_IPTREE_H
++#define __IP_SET_IPTREE_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iptree"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iptreed {
++ unsigned long expires[256]; /* x.x.x.ADDR */
++};
++
++struct ip_set_iptreec {
++ struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
++};
++
++struct ip_set_iptreeb {
++ struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
++};
++
++struct ip_set_iptree {
++ unsigned int timeout;
++ unsigned int gc_interval;
++#ifdef __KERNEL__
++ uint32_t elements; /* number of elements */
++ struct timer_list gc;
++ struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
++#endif
++};
++
++struct ip_set_req_iptree_create {
++ unsigned int timeout;
++};
++
++struct ip_set_req_iptree {
++ ip_set_ip_t ip;
++ unsigned int timeout;
++};
++
++#endif /* __IP_SET_IPTREE_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iptreemap.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptreemap.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iptreemap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptreemap.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,40 @@
++#ifndef __IP_SET_IPTREEMAP_H
++#define __IP_SET_IPTREEMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iptreemap"
++
++#ifdef __KERNEL__
++struct ip_set_iptreemap_d {
++ unsigned char bitmap[32]; /* x.x.x.y */
++};
++
++struct ip_set_iptreemap_c {
++ struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
++};
++
++struct ip_set_iptreemap_b {
++ struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
++ unsigned char dirty[32];
++};
++#endif
++
++struct ip_set_iptreemap {
++ unsigned int gc_interval;
++#ifdef __KERNEL__
++ struct timer_list gc;
++ struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
++#endif
++};
++
++struct ip_set_req_iptreemap_create {
++ unsigned int gc_interval;
++};
++
++struct ip_set_req_iptreemap {
++ ip_set_ip_t start;
++ ip_set_ip_t end;
++};
++
++#endif /* __IP_SET_IPTREEMAP_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_jhash.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_jhash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_jhash.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,148 @@
++#ifndef _LINUX_IPSET_JHASH_H
++#define _LINUX_IPSET_JHASH_H
++
++/* This is a copy of linux/jhash.h but the types u32/u8 are changed
++ * to __u32/__u8 so that the header file can be included into
++ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
++ */
++
++/* jhash.h: Jenkins hash support.
++ *
++ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ *
++ * http://burtleburtle.net/bob/hash/
++ *
++ * These are the credits from Bob's sources:
++ *
++ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
++ * hash(), hash2(), hash3, and mix() are externally useful functions.
++ * Routines to test the hash are included if SELF_TEST is defined.
++ * You can use this free for any purpose. It has no warranty.
++ *
++ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ *
++ * I've modified Bob's hash to be useful in the Linux kernel, and
++ * any bugs present are surely my fault. -DaveM
++ */
++
++/* NOTE: Arguments are modified. */
++#define __jhash_mix(a, b, c) \
++{ \
++ a -= b; a -= c; a ^= (c>>13); \
++ b -= c; b -= a; b ^= (a<<8); \
++ c -= a; c -= b; c ^= (b>>13); \
++ a -= b; a -= c; a ^= (c>>12); \
++ b -= c; b -= a; b ^= (a<<16); \
++ c -= a; c -= b; c ^= (b>>5); \
++ a -= b; a -= c; a ^= (c>>3); \
++ b -= c; b -= a; b ^= (a<<10); \
++ c -= a; c -= b; c ^= (b>>15); \
++}
++
++/* The golden ration: an arbitrary value */
++#define JHASH_GOLDEN_RATIO 0x9e3779b9
++
++/* The most generic version, hashes an arbitrary sequence
++ * of bytes. No alignment or length assumptions are made about
++ * the input key.
++ */
++static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++{
++ __u32 a, b, c, len;
++ __u8 *k = key;
++
++ len = length;
++ a = b = JHASH_GOLDEN_RATIO;
++ c = initval;
++
++ while (len >= 12) {
++ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
++ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
++ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
++
++ __jhash_mix(a,b,c);
++
++ k += 12;
++ len -= 12;
++ }
++
++ c += length;
++ switch (len) {
++ case 11: c += ((__u32)k[10]<<24);
++ case 10: c += ((__u32)k[9]<<16);
++ case 9 : c += ((__u32)k[8]<<8);
++ case 8 : b += ((__u32)k[7]<<24);
++ case 7 : b += ((__u32)k[6]<<16);
++ case 6 : b += ((__u32)k[5]<<8);
++ case 5 : b += k[4];
++ case 4 : a += ((__u32)k[3]<<24);
++ case 3 : a += ((__u32)k[2]<<16);
++ case 2 : a += ((__u32)k[1]<<8);
++ case 1 : a += k[0];
++ };
++
++ __jhash_mix(a,b,c);
++
++ return c;
++}
++
++/* A special optimized version that handles 1 or more of __u32s.
++ * The length parameter here is the number of __u32s in the key.
++ */
++static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++{
++ __u32 a, b, c, len;
++
++ a = b = JHASH_GOLDEN_RATIO;
++ c = initval;
++ len = length;
++
++ while (len >= 3) {
++ a += k[0];
++ b += k[1];
++ c += k[2];
++ __jhash_mix(a, b, c);
++ k += 3; len -= 3;
++ }
++
++ c += length * 4;
++
++ switch (len) {
++ case 2 : b += k[1];
++ case 1 : a += k[0];
++ };
++
++ __jhash_mix(a,b,c);
++
++ return c;
++}
++
++
++/* A special ultra-optimized versions that knows they are hashing exactly
++ * 3, 2 or 1 word(s).
++ *
++ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
++ * done at the end is not done here.
++ */
++static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++{
++ a += JHASH_GOLDEN_RATIO;
++ b += JHASH_GOLDEN_RATIO;
++ c += initval;
++
++ __jhash_mix(a, b, c);
++
++ return c;
++}
++
++static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++{
++ return jhash_3words(a, b, 0, initval);
++}
++
++static inline __u32 jhash_1word(__u32 a, __u32 initval)
++{
++ return jhash_3words(a, 0, 0, initval);
++}
++
++#endif /* _LINUX_IPSET_JHASH_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_macipmap.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_macipmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_macipmap.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,38 @@
++#ifndef __IP_SET_MACIPMAP_H
++#define __IP_SET_MACIPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "macipmap"
++#define MAX_RANGE 0x0000FFFF
++
++/* general flags */
++#define IPSET_MACIP_MATCHUNSET 1
++
++/* per ip flags */
++#define IPSET_MACIP_ISSET 1
++
++struct ip_set_macipmap {
++ void *members; /* the macipmap proper */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t flags;
++};
++
++struct ip_set_req_macipmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++ u_int32_t flags;
++};
++
++struct ip_set_req_macipmap {
++ ip_set_ip_t ip;
++ unsigned char ethernet[ETH_ALEN];
++};
++
++struct ip_set_macip {
++ unsigned short flags;
++ unsigned char ethernet[ETH_ALEN];
++};
++
++#endif /* __IP_SET_MACIPMAP_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_malloc.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_malloc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_malloc.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,116 @@
++#ifndef _IP_SET_MALLOC_H
++#define _IP_SET_MALLOC_H
++
++#ifdef __KERNEL__
++
++/* Memory allocation and deallocation */
++static size_t max_malloc_size = 0;
++
++static inline void init_max_malloc_size(void)
++{
++#define CACHE(x) max_malloc_size = x;
++#include <linux/kmalloc_sizes.h>
++#undef CACHE
++}
++
++static inline void * ip_set_malloc(size_t bytes)
++{
++ if (bytes > max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ if (bytes > max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
++struct harray {
++ size_t max_elements;
++ void *arrays[0];
++};
++
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, int flags)
++{
++ struct harray *harray;
++ size_t max_elements, size, i, j;
++
++ if (!max_malloc_size)
++ init_max_malloc_size();
++
++ if (typesize > max_malloc_size)
++ return NULL;
++
++ max_elements = max_malloc_size/typesize;
++ size = hashsize/max_elements;
++ if (hashsize % max_elements)
++ size++;
++
++ /* Last pointer signals end of arrays */
++ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
++ flags);
++
++ if (!harray)
++ return NULL;
++
++ for (i = 0; i < size - 1; i++) {
++ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
++ if (!harray->arrays[i])
++ goto undo;
++ memset(harray->arrays[i], 0, max_elements * typesize);
++ }
++ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
++ flags);
++ if (!harray->arrays[i])
++ goto undo;
++ memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
++
++ harray->max_elements = max_elements;
++ harray->arrays[size] = NULL;
++
++ return (void *)harray;
++
++ undo:
++ for (j = 0; j < i; j++) {
++ kfree(harray->arrays[j]);
++ }
++ kfree(harray);
++ return NULL;
++}
++
++static inline void harray_free(void *h)
++{
++ struct harray *harray = (struct harray *) h;
++ size_t i;
++
++ for (i = 0; harray->arrays[i] != NULL; i++)
++ kfree(harray->arrays[i]);
++ kfree(harray);
++}
++
++static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
++{
++ struct harray *harray = (struct harray *) h;
++ size_t i;
++
++ for (i = 0; harray->arrays[i+1] != NULL; i++)
++ memset(harray->arrays[i], 0, harray->max_elements * typesize);
++ memset(harray->arrays[i], 0,
++ (hashsize - i * harray->max_elements) * typesize);
++}
++
++#define HARRAY_ELEM(h, type, which) \
++({ \
++ struct harray *__h = (struct harray *)(h); \
++ ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
++ + (which)%(__h)->max_elements); \
++})
++
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_MALLOC_H*/
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_nethash.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_nethash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_nethash.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,55 @@
++#ifndef __IP_SET_NETHASH_H
++#define __IP_SET_NETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "nethash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_nethash {
++ ip_set_ip_t *members; /* the nethash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ unsigned char cidr[30]; /* CIDR sizes */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_nethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++};
++
++struct ip_set_req_nethash {
++ ip_set_ip_t ip;
++ unsigned char cidr;
++};
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++#endif /* __IP_SET_NETHASH_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_portmap.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_portmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_portmap.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,25 @@
++#ifndef __IP_SET_PORTMAP_H
++#define __IP_SET_PORTMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "portmap"
++#define MAX_RANGE 0x0000FFFF
++#define INVALID_PORT (MAX_RANGE + 1)
++
++struct ip_set_portmap {
++ void *members; /* the portmap proper */
++ ip_set_ip_t first_port; /* host byte order, included in range */
++ ip_set_ip_t last_port; /* host byte order, included in range */
++};
++
++struct ip_set_req_portmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_portmap {
++ ip_set_ip_t port;
++};
++
++#endif /* __IP_SET_PORTMAP_H */
+diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ipt_set.h
+--- linux-2.6.23/include/linux/netfilter_ipv4/ipt_set.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ipt_set.h 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,21 @@
++#ifndef _IPT_SET_H
++#define _IPT_SET_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++struct ipt_set_info {
++ ip_set_id_t index;
++ u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
++};
++
++/* match info */
++struct ipt_set_info_match {
++ struct ipt_set_info match_set;
++};
++
++struct ipt_set_info_target {
++ struct ipt_set_info add_set;
++ struct ipt_set_info del_set;
++};
++
++#endif /*_IPT_SET_H*/
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,2003 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module for IP set management */
++
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kmod.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/random.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <asm/semaphore.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/ip_set.h>
++
++static struct list_head set_type_list; /* all registered sets */
++static struct ip_set **ip_set_list; /* all individual sets */
++static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
++static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
++static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
++static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
++static struct list_head *ip_set_hash; /* hash of bindings */
++static unsigned int ip_set_hash_random; /* random seed */
++
++/*
++ * Sets are identified either by the index in ip_set_list or by id.
++ * The id never changes and is used to find a key in the hash.
++ * The index may change by swapping and used at all other places
++ * (set/SET netfilter modules, binding value, etc.)
++ *
++ * Userspace requests are serialized by ip_set_mutex and sets can
++ * be deleted only from userspace. Therefore ip_set_list locking
++ * must obey the following rules:
++ *
++ * - kernel requests: read and write locking mandatory
++ * - user requests: read locking optional, write locking mandatory
++ */
++
++static inline void
++__ip_set_get(ip_set_id_t index)
++{
++ atomic_inc(&ip_set_list[index]->ref);
++}
++
++static inline void
++__ip_set_put(ip_set_id_t index)
++{
++ atomic_dec(&ip_set_list[index]->ref);
++}
++
++/*
++ * Binding routines
++ */
++
++static inline struct ip_set_hash *
++__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
++{
++ struct ip_set_hash *set_hash;
++
++ list_for_each_entry(set_hash, &ip_set_hash[key], list)
++ if (set_hash->id == id && set_hash->ip == ip)
++ return set_hash;
++
++ return NULL;
++}
++
++static ip_set_id_t
++ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++
++ ASSERT_READ_LOCK(&ip_set_lock);
++ IP_SET_ASSERT(ip_set_list[id]);
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++
++ set_hash = __ip_set_find(key, id, ip);
++
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip),
++ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
++}
++
++static inline void
++__set_hash_del(struct ip_set_hash *set_hash)
++{
++ ASSERT_WRITE_LOCK(&ip_set_lock);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++
++ __ip_set_put(set_hash->binding);
++ list_del(&set_hash->list);
++ kfree(set_hash);
++}
++
++static int
++ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++
++ IP_SET_ASSERT(ip_set_list[id]);
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++ write_lock_bh(&ip_set_lock);
++ set_hash = __ip_set_find(key, id, ip);
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip),
++ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++ if (set_hash != NULL)
++ __set_hash_del(set_hash);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++}
++
++static int
++ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++ int ret = 0;
++
++ IP_SET_ASSERT(ip_set_list[id]);
++ IP_SET_ASSERT(ip_set_list[binding]);
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip), ip_set_list[binding]->name);
++ write_lock_bh(&ip_set_lock);
++ set_hash = __ip_set_find(key, id, ip);
++ if (!set_hash) {
++ set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
++ if (!set_hash) {
++ ret = -ENOMEM;
++ goto unlock;
++ }
++ INIT_LIST_HEAD(&set_hash->list);
++ set_hash->id = id;
++ set_hash->ip = ip;
++ list_add(&set_hash->list, &ip_set_hash[key]);
++ } else {
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ DP("overwrite binding: %s",
++ ip_set_list[set_hash->binding]->name);
++ __ip_set_put(set_hash->binding);
++ }
++ set_hash->binding = binding;
++ __ip_set_get(set_hash->binding);
++ DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
++ key, id, ip_set_list[id]->name,
++ HIPQUAD(ip), binding, ip_set_list[binding]->name);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return ret;
++}
++
++#define FOREACH_HASH_DO(fn, args...) \
++({ \
++ ip_set_id_t __key; \
++ struct ip_set_hash *__set_hash; \
++ \
++ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
++ list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
++ fn(__set_hash , ## args); \
++ } \
++})
++
++#define FOREACH_HASH_RW_DO(fn, args...) \
++({ \
++ ip_set_id_t __key; \
++ struct ip_set_hash *__set_hash, *__n; \
++ \
++ ASSERT_WRITE_LOCK(&ip_set_lock); \
++ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
++ list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
++ fn(__set_hash , ## args); \
++ } \
++})
++
++/* Add, del and test set entries from kernel */
++
++#define follow_bindings(index, set, ip) \
++((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
++ || (index = (set)->binding) != IP_SET_INVALID_ID)
++
++int
++ip_set_testip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ read_lock_bh(&set->lock);
++ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
++ read_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while (res > 0
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++
++ return res;
++}
++
++void
++ip_set_addip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ retry:
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ write_lock_bh(&set->lock);
++ res = set->type->addip_kernel(set, skb, &ip, flags, i++);
++ write_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while ((res == 0 || res == -EEXIST)
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++
++ if (res == -EAGAIN
++ && set->type->retry
++ && (res = set->type->retry(set)) == 0)
++ goto retry;
++}
++
++void
++ip_set_delip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ write_lock_bh(&set->lock);
++ res = set->type->delip_kernel(set, skb, &ip, flags, i++);
++ write_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while ((res == 0 || res == -EEXIST)
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++}
++
++/* Register and deregister settype */
++
++static inline struct ip_set_type *
++find_set_type(const char *name)
++{
++ struct ip_set_type *set_type;
++
++ list_for_each_entry(set_type, &set_type_list, list)
++ if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
++ return set_type;
++ return NULL;
++}
++
++int
++ip_set_register_set_type(struct ip_set_type *set_type)
++{
++ int ret = 0;
++
++ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
++ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
++ set_type->typename,
++ set_type->protocol_version,
++ IP_SET_PROTOCOL_VERSION);
++ return -EINVAL;
++ }
++
++ write_lock_bh(&ip_set_lock);
++ if (find_set_type(set_type->typename)) {
++ /* Duplicate! */
++ ip_set_printk("'%s' already registered!",
++ set_type->typename);
++ ret = -EINVAL;
++ goto unlock;
++ }
++ if (!try_module_get(THIS_MODULE)) {
++ ret = -EFAULT;
++ goto unlock;
++ }
++ list_add(&set_type->list, &set_type_list);
++ DP("'%s' registered.", set_type->typename);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return ret;
++}
++
++void
++ip_set_unregister_set_type(struct ip_set_type *set_type)
++{
++ write_lock_bh(&ip_set_lock);
++ if (!find_set_type(set_type->typename)) {
++ ip_set_printk("'%s' not registered?",
++ set_type->typename);
++ goto unlock;
++ }
++ list_del(&set_type->list);
++ module_put(THIS_MODULE);
++ DP("'%s' unregistered.", set_type->typename);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++
++}
++
++/*
++ * Userspace routines
++ */
++
++/*
++ * Find set by name, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byname(const char *name)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ down(&ip_set_app_mutex);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strcmp(ip_set_list[i]->name, name) == 0) {
++ __ip_set_get(i);
++ index = i;
++ break;
++ }
++ }
++ up(&ip_set_app_mutex);
++ return index;
++}
++
++/*
++ * Find set by index, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byindex(ip_set_id_t index)
++{
++ down(&ip_set_app_mutex);
++
++ if (index >= ip_set_max)
++ return IP_SET_INVALID_ID;
++
++ if (ip_set_list[index])
++ __ip_set_get(index);
++ else
++ index = IP_SET_INVALID_ID;
++
++ up(&ip_set_app_mutex);
++ return index;
++}
++
++/*
++ * If the given set pointer points to a valid set, decrement
++ * reference count by 1. The caller shall not assume the index
++ * to be valid, after calling this function.
++ */
++void ip_set_put(ip_set_id_t index)
++{
++ down(&ip_set_app_mutex);
++ if (ip_set_list[index])
++ __ip_set_put(index);
++ up(&ip_set_app_mutex);
++}
++
++/* Find a set by name or index */
++static ip_set_id_t
++ip_set_find_byname(const char *name)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strcmp(ip_set_list[i]->name, name) == 0) {
++ index = i;
++ break;
++ }
++ }
++ return index;
++}
++
++static ip_set_id_t
++ip_set_find_byindex(ip_set_id_t index)
++{
++ if (index >= ip_set_max || ip_set_list[index] == NULL)
++ index = IP_SET_INVALID_ID;
++
++ return index;
++}
++
++/*
++ * Add, del, test, bind and unbind
++ */
++
++static inline int
++__ip_set_testip(struct ip_set *set,
++ const void *data,
++ size_t size,
++ ip_set_ip_t *ip)
++{
++ int res;
++
++ read_lock_bh(&set->lock);
++ res = set->type->testip(set, data, size, ip);
++ read_unlock_bh(&set->lock);
++
++ return res;
++}
++
++static int
++__ip_set_addip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ do {
++ write_lock_bh(&set->lock);
++ res = set->type->addip(set, data, size, &ip);
++ write_unlock_bh(&set->lock);
++ } while (res == -EAGAIN
++ && set->type->retry
++ && (res = set->type->retry(set)) == 0);
++
++ return res;
++}
++
++static int
++ip_set_addip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++
++ return __ip_set_addip(index,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt));
++}
++
++static int
++ip_set_delip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ write_lock_bh(&set->lock);
++ res = set->type->delip(set,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt),
++ &ip);
++ write_unlock_bh(&set->lock);
++
++ return res;
++}
++
++static int
++ip_set_testip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt),
++ &ip);
++
++ return (res > 0 ? -EEXIST : res);
++}
++
++static int
++ip_set_bindip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_req_bind *req_bind;
++ ip_set_id_t binding;
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of a set */
++ char *binding_name;
++
++ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++ return -EINVAL;
++
++ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
++ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ binding = ip_set_find_byname(binding_name);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ write_lock_bh(&ip_set_lock);
++ /* Sets as binding values are referenced */
++ if (set->binding != IP_SET_INVALID_ID)
++ __ip_set_put(set->binding);
++ set->binding = binding;
++ __ip_set_get(set->binding);
++ write_unlock_bh(&ip_set_lock);
++
++ return 0;
++ }
++ binding = ip_set_find_byname(req_bind->binding);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++ DP("set %s, ip: %u.%u.%u.%u, binding %s",
++ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++
++ if (res >= 0)
++ res = ip_set_hash_add(set->id, ip, binding);
++
++ return res;
++}
++
++#define FOREACH_SET_DO(fn, args...) \
++({ \
++ ip_set_id_t __i; \
++ struct ip_set *__set; \
++ \
++ for (__i = 0; __i < ip_set_max; __i++) { \
++ __set = ip_set_list[__i]; \
++ if (__set != NULL) \
++ fn(__set , ##args); \
++ } \
++})
++
++static inline void
++__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
++{
++ if (set_hash->id == id)
++ __set_hash_del(set_hash);
++}
++
++static inline void
++__unbind_default(struct ip_set *set)
++{
++ if (set->binding != IP_SET_INVALID_ID) {
++ /* Sets as binding values are referenced */
++ __ip_set_put(set->binding);
++ set->binding = IP_SET_INVALID_ID;
++ }
++}
++
++static int
++ip_set_unbindip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set;
++ struct ip_set_req_bind *req_bind;
++ ip_set_ip_t ip;
++ int res;
++
++ DP("");
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ DP("%u %s", index, req_bind->binding);
++ if (index == IP_SET_INVALID_ID) {
++ /* unbind :all: */
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of sets */
++ write_lock_bh(&ip_set_lock);
++ FOREACH_SET_DO(__unbind_default);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all bindings of all sets*/
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ }
++ DP("unreachable reached!");
++ return -EINVAL;
++ }
++
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of set */
++ ip_set_id_t binding = ip_set_find_byindex(set->binding);
++
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ write_lock_bh(&ip_set_lock);
++ /* Sets in hash values are referenced */
++ __ip_set_put(set->binding);
++ set->binding = IP_SET_INVALID_ID;
++ write_unlock_bh(&ip_set_lock);
++
++ return 0;
++ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all bindings */
++
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ }
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++
++ DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
++ if (res >= 0)
++ res = ip_set_hash_del(set->id, ip);
++
++ return res;
++}
++
++static int
++ip_set_testbind(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_req_bind *req_bind;
++ ip_set_id_t binding;
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of set */
++ char *binding_name;
++
++ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++ return -EINVAL;
++
++ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
++ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ binding = ip_set_find_byname(binding_name);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ res = (set->binding == binding) ? -EEXIST : 0;
++
++ return res;
++ }
++ binding = ip_set_find_byname(req_bind->binding);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++ DP("set %s, ip: %u.%u.%u.%u, binding %s",
++ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++
++ if (res >= 0)
++ res = (ip_set_find_in_hash(set->id, ip) == binding)
++ ? -EEXIST : 0;
++
++ return res;
++}
++
++static struct ip_set_type *
++find_set_type_rlock(const char *typename)
++{
++ struct ip_set_type *type;
++
++ read_lock_bh(&ip_set_lock);
++ type = find_set_type(typename);
++ if (type == NULL)
++ read_unlock_bh(&ip_set_lock);
++
++ return type;
++}
++
++static int
++find_free_id(const char *name,
++ ip_set_id_t *index,
++ ip_set_id_t *id)
++{
++ ip_set_id_t i;
++
++ *id = IP_SET_INVALID_ID;
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] == NULL) {
++ if (*id == IP_SET_INVALID_ID)
++ *id = *index = i;
++ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ /* Name clash */
++ return -EEXIST;
++ }
++ if (*id == IP_SET_INVALID_ID)
++ /* No free slot remained */
++ return -ERANGE;
++ /* Check that index is usable as id (swapping) */
++ check:
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && ip_set_list[i]->id == *id) {
++ *id = i;
++ goto check;
++ }
++ }
++ return 0;
++}
++
++/*
++ * Create a set
++ */
++static int
++ip_set_create(const char *name,
++ const char *typename,
++ ip_set_id_t restore,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set;
++ ip_set_id_t index = 0, id;
++ int res = 0;
++
++ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++ /*
++ * First, and without any locks, allocate and initialize
++ * a normal base set structure.
++ */
++ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
++ if (!set)
++ return -ENOMEM;
++ set->lock = RW_LOCK_UNLOCKED;
++ strncpy(set->name, name, IP_SET_MAXNAMELEN);
++ set->binding = IP_SET_INVALID_ID;
++ atomic_set(&set->ref, 0);
++
++ /*
++ * Next, take the &ip_set_lock, check that we know the type,
++ * and take a reference on the type, to make sure it
++ * stays available while constructing our new set.
++ *
++ * After referencing the type, we drop the &ip_set_lock,
++ * and let the new set construction run without locks.
++ */
++ set->type = find_set_type_rlock(typename);
++ if (set->type == NULL) {
++ /* Try loading the module */
++ char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
++ strcpy(modulename, "ip_set_");
++ strcat(modulename, typename);
++ DP("try to load %s", modulename);
++ request_module(modulename);
++ set->type = find_set_type_rlock(typename);
++ }
++ if (set->type == NULL) {
++ ip_set_printk("no set type '%s', set '%s' not created",
++ typename, name);
++ res = -ENOENT;
++ goto out;
++ }
++ if (!try_module_get(set->type->me)) {
++ read_unlock_bh(&ip_set_lock);
++ res = -EFAULT;
++ goto out;
++ }
++ read_unlock_bh(&ip_set_lock);
++
++ /*
++ * Without holding any locks, create private part.
++ */
++ res = set->type->create(set, data, size);
++ if (res != 0)
++ goto put_out;
++
++ /* BTW, res==0 here. */
++
++ /*
++ * Here, we have a valid, constructed set. &ip_set_lock again,
++ * find free id/index and check that it is not already in
++ * ip_set_list.
++ */
++ write_lock_bh(&ip_set_lock);
++ if ((res = find_free_id(set->name, &index, &id)) != 0) {
++ DP("no free id!");
++ goto cleanup;
++ }
++
++ /* Make sure restore gets the same index */
++ if (restore != IP_SET_INVALID_ID && index != restore) {
++ DP("Can't restore, sets are screwed up");
++ res = -ERANGE;
++ goto cleanup;
++ }
++
++ /*
++ * Finally! Add our shiny new set to the list, and be done.
++ */
++ DP("create: '%s' created with index %u, id %u!", set->name, index, id);
++ set->id = id;
++ ip_set_list[index] = set;
++ write_unlock_bh(&ip_set_lock);
++ return res;
++
++ cleanup:
++ write_unlock_bh(&ip_set_lock);
++ set->type->destroy(set);
++ put_out:
++ module_put(set->type->me);
++ out:
++ kfree(set);
++ return res;
++}
++
++/*
++ * Destroy a given existing set
++ */
++static void
++ip_set_destroy_set(ip_set_id_t index)
++{
++ struct ip_set *set = ip_set_list[index];
++
++ IP_SET_ASSERT(set);
++ DP("set: %s", set->name);
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++ if (set->binding != IP_SET_INVALID_ID)
++ __ip_set_put(set->binding);
++ ip_set_list[index] = NULL;
++ write_unlock_bh(&ip_set_lock);
++
++ /* Must call it without holding any lock */
++ set->type->destroy(set);
++ module_put(set->type->me);
++ kfree(set);
++}
++
++/*
++ * Destroy a set - or all sets
++ * Sets must not be referenced/used.
++ */
++static int
++ip_set_destroy(ip_set_id_t index)
++{
++ ip_set_id_t i;
++
++ /* ref modification always protected by the mutex */
++ if (index != IP_SET_INVALID_ID) {
++ if (atomic_read(&ip_set_list[index]->ref))
++ return -EBUSY;
++ ip_set_destroy_set(index);
++ } else {
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && (atomic_read(&ip_set_list[i]->ref)))
++ return -EBUSY;
++ }
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL)
++ ip_set_destroy_set(i);
++ }
++ }
++ return 0;
++}
++
++static void
++ip_set_flush_set(struct ip_set *set)
++{
++ DP("set: %s %u", set->name, set->id);
++
++ write_lock_bh(&set->lock);
++ set->type->flush(set);
++ write_unlock_bh(&set->lock);
++}
++
++/*
++ * Flush data in a set - or in all sets
++ */
++static int
++ip_set_flush(ip_set_id_t index)
++{
++ if (index != IP_SET_INVALID_ID) {
++ IP_SET_ASSERT(ip_set_list[index]);
++ ip_set_flush_set(ip_set_list[index]);
++ } else
++ FOREACH_SET_DO(ip_set_flush_set);
++
++ return 0;
++}
++
++/* Rename a set */
++static int
++ip_set_rename(ip_set_id_t index, const char *name)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_id_t i;
++ int res = 0;
++
++ DP("set: %s to %s", set->name, name);
++ write_lock_bh(&ip_set_lock);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strncmp(ip_set_list[i]->name,
++ name,
++ IP_SET_MAXNAMELEN - 1) == 0) {
++ res = -EEXIST;
++ goto unlock;
++ }
++ }
++ strncpy(set->name, name, IP_SET_MAXNAMELEN);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return res;
++}
++
++/*
++ * Swap two sets so that name/index points to the other.
++ * References are also swapped.
++ */
++static int
++ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
++{
++ struct ip_set *from = ip_set_list[from_index];
++ struct ip_set *to = ip_set_list[to_index];
++ char from_name[IP_SET_MAXNAMELEN];
++ u_int32_t from_ref;
++
++ DP("set: %s to %s", from->name, to->name);
++ /* Features must not change. Artifical restriction. */
++ if (from->type->features != to->type->features)
++ return -ENOEXEC;
++
++ /* No magic here: ref munging protected by the mutex */
++ write_lock_bh(&ip_set_lock);
++ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
++ from_ref = atomic_read(&from->ref);
++
++ strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
++ atomic_set(&from->ref, atomic_read(&to->ref));
++ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
++ atomic_set(&to->ref, from_ref);
++
++ ip_set_list[from_index] = to;
++ ip_set_list[to_index] = from;
++
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++}
++
++/*
++ * List set data
++ */
++
++static inline void
++__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
++ ip_set_id_t id, size_t *size)
++{
++ if (set_hash->id == id)
++ *size += sizeof(struct ip_set_hash_list);
++}
++
++static inline void
++__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
++ ip_set_id_t id, size_t *size)
++{
++ if (set_hash->id == id)
++ *size += sizeof(struct ip_set_hash_save);
++}
++
++static inline void
++__set_hash_bindings(struct ip_set_hash *set_hash,
++ ip_set_id_t id, void *data, int *used)
++{
++ if (set_hash->id == id) {
++ struct ip_set_hash_list *hash_list =
++ (struct ip_set_hash_list *)(data + *used);
++
++ hash_list->ip = set_hash->ip;
++ hash_list->binding = set_hash->binding;
++ *used += sizeof(struct ip_set_hash_list);
++ }
++}
++
++static int ip_set_list_set(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_list *set_list;
++
++ /* Pointer to our header */
++ set_list = (struct ip_set_list *) (data + *used);
++
++ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
++
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_list) > len)
++ goto not_enough_mem;
++ *used += sizeof(struct ip_set_list);
++
++ read_lock_bh(&set->lock);
++ /* Get and ensure set specific header size */
++ set_list->header_size = set->type->header_size;
++ if (*used + set_list->header_size > len)
++ goto unlock_set;
++
++ /* Fill in the header */
++ set_list->index = index;
++ set_list->binding = set->binding;
++ set_list->ref = atomic_read(&set->ref);
++
++ /* Fill in set spefific header data */
++ set->type->list_header(set, data + *used);
++ *used += set_list->header_size;
++
++ /* Get and ensure set specific members size */
++ set_list->members_size = set->type->list_members_size(set);
++ if (*used + set_list->members_size > len)
++ goto unlock_set;
++
++ /* Fill in set spefific members data */
++ set->type->list_members(set, data + *used);
++ *used += set_list->members_size;
++ read_unlock_bh(&set->lock);
++
++ /* Bindings */
++
++ /* Get and ensure set specific bindings size */
++ set_list->bindings_size = 0;
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ set->id, &set_list->bindings_size);
++ if (*used + set_list->bindings_size > len)
++ goto not_enough_mem;
++
++ /* Fill in set spefific bindings data */
++ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
++
++ return 0;
++
++ unlock_set:
++ read_unlock_bh(&set->lock);
++ not_enough_mem:
++ DP("not enough mem, try again");
++ return -EAGAIN;
++}
++
++/*
++ * Save sets
++ */
++static int ip_set_save_set(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ struct ip_set *set;
++ struct ip_set_save *set_save;
++
++ /* Pointer to our header */
++ set_save = (struct ip_set_save *) (data + *used);
++
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_save) > len)
++ goto not_enough_mem;
++ *used += sizeof(struct ip_set_save);
++
++ set = ip_set_list[index];
++ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ data, data + *used);
++
++ read_lock_bh(&set->lock);
++ /* Get and ensure set specific header size */
++ set_save->header_size = set->type->header_size;
++ if (*used + set_save->header_size > len)
++ goto unlock_set;
++
++ /* Fill in the header */
++ set_save->index = index;
++ set_save->binding = set->binding;
++
++ /* Fill in set spefific header data */
++ set->type->list_header(set, data + *used);
++ *used += set_save->header_size;
++
++ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
++ set_save->header_size, data, data + *used);
++ /* Get and ensure set specific members size */
++ set_save->members_size = set->type->list_members_size(set);
++ if (*used + set_save->members_size > len)
++ goto unlock_set;
++
++ /* Fill in set spefific members data */
++ set->type->list_members(set, data + *used);
++ *used += set_save->members_size;
++ read_unlock_bh(&set->lock);
++ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
++ set_save->members_size, data, data + *used);
++ return 0;
++
++ unlock_set:
++ read_unlock_bh(&set->lock);
++ not_enough_mem:
++ DP("not enough mem, try again");
++ return -EAGAIN;
++}
++
++static inline void
++__set_hash_save_bindings(struct ip_set_hash *set_hash,
++ ip_set_id_t id,
++ void *data,
++ int *used,
++ int len,
++ int *res)
++{
++ if (*res == 0
++ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
++ struct ip_set_hash_save *hash_save =
++ (struct ip_set_hash_save *)(data + *used);
++ /* Ensure bindings size */
++ if (*used + sizeof(struct ip_set_hash_save) > len) {
++ *res = -ENOMEM;
++ return;
++ }
++ hash_save->id = set_hash->id;
++ hash_save->ip = set_hash->ip;
++ hash_save->binding = set_hash->binding;
++ *used += sizeof(struct ip_set_hash_save);
++ }
++}
++
++static int ip_set_save_bindings(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ int res = 0;
++ struct ip_set_save *set_save;
++
++ DP("used %u, len %u", *used, len);
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_save) > len)
++ return -ENOMEM;
++
++ /* Marker */
++ set_save = (struct ip_set_save *) (data + *used);
++ set_save->index = IP_SET_INVALID_ID;
++ set_save->header_size = 0;
++ set_save->members_size = 0;
++ *used += sizeof(struct ip_set_save);
++
++ DP("marker added used %u, len %u", *used, len);
++ /* Fill in bindings data */
++ if (index != IP_SET_INVALID_ID)
++ /* Sets are identified by id in hash */
++ index = ip_set_list[index]->id;
++ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
++
++ return res;
++}
++
++/*
++ * Restore sets
++ */
++static int ip_set_restore(void *data,
++ int len)
++{
++ int res = 0;
++ int line = 0, used = 0, members_size;
++ struct ip_set *set;
++ struct ip_set_hash_save *hash_save;
++ struct ip_set_restore *set_restore;
++ ip_set_id_t index;
++
++ /* Loop to restore sets */
++ while (1) {
++ line++;
++
++ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++ /* Get and ensure header size */
++ if (used + sizeof(struct ip_set_restore) > len)
++ return line;
++ set_restore = (struct ip_set_restore *) (data + used);
++ used += sizeof(struct ip_set_restore);
++
++ /* Ensure data size */
++ if (used
++ + set_restore->header_size
++ + set_restore->members_size > len)
++ return line;
++
++ /* Check marker */
++ if (set_restore->index == IP_SET_INVALID_ID) {
++ line--;
++ goto bindings;
++ }
++
++ /* Try to create the set */
++ DP("restore %s %s", set_restore->name, set_restore->typename);
++ res = ip_set_create(set_restore->name,
++ set_restore->typename,
++ set_restore->index,
++ data + used,
++ set_restore->header_size);
++
++ if (res != 0)
++ return line;
++ used += set_restore->header_size;
++
++ index = ip_set_find_byindex(set_restore->index);
++ DP("index %u, restore_index %u", index, set_restore->index);
++ if (index != set_restore->index)
++ return line;
++ /* Try to restore members data */
++ set = ip_set_list[index];
++ members_size = 0;
++ DP("members_size %u reqsize %u",
++ set_restore->members_size, set->type->reqsize);
++ while (members_size + set->type->reqsize <=
++ set_restore->members_size) {
++ line++;
++ DP("members: %u, line %u", members_size, line);
++ res = __ip_set_addip(index,
++ data + used + members_size,
++ set->type->reqsize);
++ if (!(res == 0 || res == -EEXIST))
++ return line;
++ members_size += set->type->reqsize;
++ }
++
++ DP("members_size %u %u",
++ set_restore->members_size, members_size);
++ if (members_size != set_restore->members_size)
++ return line++;
++ used += set_restore->members_size;
++ }
++
++ bindings:
++ /* Loop to restore bindings */
++ while (used < len) {
++ line++;
++
++ DP("restore binding, line %u", line);
++ /* Get and ensure size */
++ if (used + sizeof(struct ip_set_hash_save) > len)
++ return line;
++ hash_save = (struct ip_set_hash_save *) (data + used);
++ used += sizeof(struct ip_set_hash_save);
++
++ /* hash_save->id is used to store the index */
++ index = ip_set_find_byindex(hash_save->id);
++ DP("restore binding index %u, id %u, %u -> %u",
++ index, hash_save->id, hash_save->ip, hash_save->binding);
++ if (index != hash_save->id)
++ return line;
++ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
++ DP("corrupt binding set index %u", hash_save->binding);
++ return line;
++ }
++ set = ip_set_list[hash_save->id];
++ /* Null valued IP means default binding */
++ if (hash_save->ip)
++ res = ip_set_hash_add(set->id,
++ hash_save->ip,
++ hash_save->binding);
++ else {
++ IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
++ write_lock_bh(&ip_set_lock);
++ set->binding = hash_save->binding;
++ __ip_set_get(set->binding);
++ write_unlock_bh(&ip_set_lock);
++ DP("default binding: %u", set->binding);
++ }
++ if (res != 0)
++ return line;
++ }
++ if (used != len)
++ return line;
++
++ return 0;
++}
++
++static int
++ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
++{
++ void *data;
++ int res = 0; /* Assume OK */
++ unsigned *op;
++ struct ip_set_req_adt *req_adt;
++ ip_set_id_t index = IP_SET_INVALID_ID;
++ int (*adtfn)(ip_set_id_t index,
++ const void *data, size_t size);
++ struct fn_table {
++ int (*fn)(ip_set_id_t index,
++ const void *data, size_t size);
++ } adtfn_table[] =
++ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
++ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
++ };
++
++ DP("optval=%d, user=%p, len=%d", optval, user, len);
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (optval != SO_IP_SET)
++ return -EBADF;
++ if (len <= sizeof(unsigned)) {
++ ip_set_printk("short userdata (want >%zu, got %u)",
++ sizeof(unsigned), len);
++ return -EINVAL;
++ }
++ data = vmalloc(len);
++ if (!data) {
++ DP("out of mem for %u bytes", len);
++ return -ENOMEM;
++ }
++ if (copy_from_user(data, user, len) != 0) {
++ res = -EFAULT;
++ goto done;
++ }
++ if (down_interruptible(&ip_set_app_mutex)) {
++ res = -EINTR;
++ goto done;
++ }
++
++ op = (unsigned *)data;
++ DP("op=%x", *op);
++
++ if (*op < IP_SET_OP_VERSION) {
++ /* Check the version at the beginning of operations */
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++ res = -EPROTO;
++ goto done;
++ }
++ }
++
++ switch (*op) {
++ case IP_SET_OP_CREATE:{
++ struct ip_set_req_create *req_create
++ = (struct ip_set_req_create *) data;
++
++ if (len < sizeof(struct ip_set_req_create)) {
++ ip_set_printk("short CREATE data (want >=%zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++ res = ip_set_create(req_create->name,
++ req_create->typename,
++ IP_SET_INVALID_ID,
++ data + sizeof(struct ip_set_req_create),
++ len - sizeof(struct ip_set_req_create));
++ goto done;
++ }
++ case IP_SET_OP_DESTROY:{
++ struct ip_set_req_std *req_destroy
++ = (struct ip_set_req_std *) data;
++
++ if (len != sizeof(struct ip_set_req_std)) {
++ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
++ sizeof(struct ip_set_req_std), len);
++ res = -EINVAL;
++ goto done;
++ }
++ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ /* Destroy all sets */
++ index = IP_SET_INVALID_ID;
++ } else {
++ req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_destroy->name);
++
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++
++ res = ip_set_destroy(index);
++ goto done;
++ }
++ case IP_SET_OP_FLUSH:{
++ struct ip_set_req_std *req_flush =
++ (struct ip_set_req_std *) data;
++
++ if (len != sizeof(struct ip_set_req_std)) {
++ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
++ sizeof(struct ip_set_req_std), len);
++ res = -EINVAL;
++ goto done;
++ }
++ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all sets */
++ index = IP_SET_INVALID_ID;
++ } else {
++ req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_flush->name);
++
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ res = ip_set_flush(index);
++ goto done;
++ }
++ case IP_SET_OP_RENAME:{
++ struct ip_set_req_create *req_rename
++ = (struct ip_set_req_create *) data;
++
++ if (len != sizeof(struct ip_set_req_create)) {
++ ip_set_printk("invalid RENAME data (want %zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ index = ip_set_find_byname(req_rename->name);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ res = ip_set_rename(index, req_rename->typename);
++ goto done;
++ }
++ case IP_SET_OP_SWAP:{
++ struct ip_set_req_create *req_swap
++ = (struct ip_set_req_create *) data;
++ ip_set_id_t to_index;
++
++ if (len != sizeof(struct ip_set_req_create)) {
++ ip_set_printk("invalid SWAP data (want %zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ index = ip_set_find_byname(req_swap->name);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ to_index = ip_set_find_byname(req_swap->typename);
++ if (to_index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ res = ip_set_swap(index, to_index);
++ goto done;
++ }
++ default:
++ break; /* Set identified by id */
++ }
++
++ /* There we may have add/del/test/bind/unbind/test_bind operations */
++ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
++ res = -EBADMSG;
++ goto done;
++ }
++ adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
++
++ if (len < sizeof(struct ip_set_req_adt)) {
++ ip_set_printk("short data in adt request (want >=%zu, got %u)",
++ sizeof(struct ip_set_req_adt), len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_adt = (struct ip_set_req_adt *) data;
++
++ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
++ if (!(*op == IP_SET_OP_UNBIND_SET
++ && req_adt->index == IP_SET_INVALID_ID)) {
++ index = ip_set_find_byindex(req_adt->index);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ res = adtfn(index, data, len);
++
++ done:
++ up(&ip_set_app_mutex);
++ vfree(data);
++ if (res > 0)
++ res = 0;
++ DP("final result %d", res);
++ return res;
++}
++
++static int
++ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
++{
++ int res = 0;
++ unsigned *op;
++ ip_set_id_t index = IP_SET_INVALID_ID;
++ void *data;
++ int copylen = *len;
++
++ DP("optval=%d, user=%p, len=%d", optval, user, *len);
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (optval != SO_IP_SET)
++ return -EBADF;
++ if (*len < sizeof(unsigned)) {
++ ip_set_printk("short userdata (want >=%zu, got %d)",
++ sizeof(unsigned), *len);
++ return -EINVAL;
++ }
++ data = vmalloc(*len);
++ if (!data) {
++ DP("out of mem for %d bytes", *len);
++ return -ENOMEM;
++ }
++ if (copy_from_user(data, user, *len) != 0) {
++ res = -EFAULT;
++ goto done;
++ }
++ if (down_interruptible(&ip_set_app_mutex)) {
++ res = -EINTR;
++ goto done;
++ }
++
++ op = (unsigned *) data;
++ DP("op=%x", *op);
++
++ if (*op < IP_SET_OP_VERSION) {
++ /* Check the version at the beginning of operations */
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++ res = -EPROTO;
++ goto done;
++ }
++ }
++
++ switch (*op) {
++ case IP_SET_OP_VERSION: {
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++
++ if (*len != sizeof(struct ip_set_req_version)) {
++ ip_set_printk("invalid VERSION (want %zu, got %d)",
++ sizeof(struct ip_set_req_version),
++ *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_version->version = IP_SET_PROTOCOL_VERSION;
++ res = copy_to_user(user, req_version,
++ sizeof(struct ip_set_req_version));
++ goto done;
++ }
++ case IP_SET_OP_GET_BYNAME: {
++ struct ip_set_req_get_set *req_get
++ = (struct ip_set_req_get_set *) data;
++
++ if (*len != sizeof(struct ip_set_req_get_set)) {
++ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
++ sizeof(struct ip_set_req_get_set), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_get->set.name);
++ req_get->set.index = index;
++ goto copy;
++ }
++ case IP_SET_OP_GET_BYINDEX: {
++ struct ip_set_req_get_set *req_get
++ = (struct ip_set_req_get_set *) data;
++
++ if (*len != sizeof(struct ip_set_req_get_set)) {
++ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
++ sizeof(struct ip_set_req_get_set), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byindex(req_get->set.index);
++ strncpy(req_get->set.name,
++ index == IP_SET_INVALID_ID ? ""
++ : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
++ goto copy;
++ }
++ case IP_SET_OP_ADT_GET: {
++ struct ip_set_req_adt_get *req_get
++ = (struct ip_set_req_adt_get *) data;
++
++ if (*len != sizeof(struct ip_set_req_adt_get)) {
++ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
++ sizeof(struct ip_set_req_adt_get), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_get->set.name);
++ if (index != IP_SET_INVALID_ID) {
++ req_get->set.index = index;
++ strncpy(req_get->typename,
++ ip_set_list[index]->type->typename,
++ IP_SET_MAXNAMELEN - 1);
++ } else {
++ res = -ENOENT;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_MAX_SETS: {
++ struct ip_set_req_max_sets *req_max_sets
++ = (struct ip_set_req_max_sets *) data;
++ ip_set_id_t i;
++
++ if (*len != sizeof(struct ip_set_req_max_sets)) {
++ ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
++ sizeof(struct ip_set_req_max_sets), *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ req_max_sets->set.index = IP_SET_INVALID_ID;
++ } else {
++ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_max_sets->set.index =
++ ip_set_find_byname(req_max_sets->set.name);
++ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ req_max_sets->max_sets = ip_set_max;
++ req_max_sets->sets = 0;
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL)
++ req_max_sets->sets++;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_LIST_SIZE:
++ case IP_SET_OP_SAVE_SIZE: {
++ struct ip_set_req_setnames *req_setnames
++ = (struct ip_set_req_setnames *) data;
++ struct ip_set_name_list *name_list;
++ struct ip_set *set;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_setnames)) {
++ ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_setnames), *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_setnames->size = 0;
++ used = sizeof(struct ip_set_req_setnames);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] == NULL)
++ continue;
++ name_list = (struct ip_set_name_list *)
++ (data + used);
++ used += sizeof(struct ip_set_name_list);
++ if (used > copylen) {
++ res = -EAGAIN;
++ goto done;
++ }
++ set = ip_set_list[i];
++ /* Fill in index, name, etc. */
++ name_list->index = i;
++ name_list->id = set->id;
++ strncpy(name_list->name,
++ set->name,
++ IP_SET_MAXNAMELEN - 1);
++ strncpy(name_list->typename,
++ set->type->typename,
++ IP_SET_MAXNAMELEN - 1);
++ DP("filled %s of type %s, index %u\n",
++ name_list->name, name_list->typename,
++ name_list->index);
++ if (!(req_setnames->index == IP_SET_INVALID_ID
++ || req_setnames->index == i))
++ continue;
++ /* Update size */
++ switch (*op) {
++ case IP_SET_OP_LIST_SIZE: {
++ req_setnames->size += sizeof(struct ip_set_list)
++ + set->type->header_size
++ + set->type->list_members_size(set);
++ /* Sets are identified by id in the hash */
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ set->id, &req_setnames->size);
++ break;
++ }
++ case IP_SET_OP_SAVE_SIZE: {
++ req_setnames->size += sizeof(struct ip_set_save)
++ + set->type->header_size
++ + set->type->list_members_size(set);
++ FOREACH_HASH_DO(__set_hash_bindings_size_save,
++ set->id, &req_setnames->size);
++ break;
++ }
++ default:
++ break;
++ }
++ }
++ if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_LIST: {
++ struct ip_set_req_list *req_list
++ = (struct ip_set_req_list *) data;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_list)) {
++ ip_set_printk("short LIST (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_list), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ index = req_list->index;
++ if (index != IP_SET_INVALID_ID
++ && ip_set_find_byindex(index) != index) {
++ res = -ENOENT;
++ goto done;
++ }
++ used = 0;
++ if (index == IP_SET_INVALID_ID) {
++ /* List all sets */
++ for (i = 0; i < ip_set_max && res == 0; i++) {
++ if (ip_set_list[i] != NULL)
++ res = ip_set_list_set(i, data, &used, *len);
++ }
++ } else {
++ /* List an individual set */
++ res = ip_set_list_set(index, data, &used, *len);
++ }
++ if (res != 0)
++ goto done;
++ else if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_SAVE: {
++ struct ip_set_req_list *req_save
++ = (struct ip_set_req_list *) data;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_list)) {
++ ip_set_printk("short SAVE (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_list), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ index = req_save->index;
++ if (index != IP_SET_INVALID_ID
++ && ip_set_find_byindex(index) != index) {
++ res = -ENOENT;
++ goto done;
++ }
++ used = 0;
++ if (index == IP_SET_INVALID_ID) {
++ /* Save all sets */
++ for (i = 0; i < ip_set_max && res == 0; i++) {
++ if (ip_set_list[i] != NULL)
++ res = ip_set_save_set(i, data, &used, *len);
++ }
++ } else {
++ /* Save an individual set */
++ res = ip_set_save_set(index, data, &used, *len);
++ }
++ if (res == 0)
++ res = ip_set_save_bindings(index, data, &used, *len);
++
++ if (res != 0)
++ goto done;
++ else if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_RESTORE: {
++ struct ip_set_req_setnames *req_restore
++ = (struct ip_set_req_setnames *) data;
++ int line;
++
++ if (*len < sizeof(struct ip_set_req_setnames)
++ || *len != req_restore->size) {
++ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
++ req_restore->size, *len);
++ res = -EINVAL;
++ goto done;
++ }
++ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
++ req_restore->size - sizeof(struct ip_set_req_setnames));
++ DP("ip_set_restore: %u", line);
++ if (line != 0) {
++ res = -EAGAIN;
++ req_restore->size = line;
++ copylen = sizeof(struct ip_set_req_setnames);
++ goto copy;
++ }
++ goto done;
++ }
++ default:
++ res = -EBADMSG;
++ goto done;
++ } /* end of switch(op) */
++
++ copy:
++ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ && ip_set_list[index]
++ ? ip_set_list[index]->name
++ : ":all:", copylen);
++ res = copy_to_user(user, data, copylen);
++
++ done:
++ up(&ip_set_app_mutex);
++ vfree(data);
++ if (res > 0)
++ res = 0;
++ DP("final result %d", res);
++ return res;
++}
++
++static struct nf_sockopt_ops so_set = {
++ .pf = PF_INET,
++ .set_optmin = SO_IP_SET,
++ .set_optmax = SO_IP_SET + 1,
++ .set = &ip_set_sockfn_set,
++ .get_optmin = SO_IP_SET,
++ .get_optmax = SO_IP_SET + 1,
++ .get = &ip_set_sockfn_get,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ .owner = THIS_MODULE,
++#endif
++};
++
++static int max_sets, hash_size;
++module_param(max_sets, int, 0600);
++MODULE_PARM_DESC(max_sets, "maximal number of sets");
++module_param(hash_size, int, 0600);
++MODULE_PARM_DESC(hash_size, "hash size for bindings");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("module implementing core IP set support");
++
++static int __init ip_set_init(void)
++{
++ int res;
++ ip_set_id_t i;
++
++ get_random_bytes(&ip_set_hash_random, 4);
++ if (max_sets)
++ ip_set_max = max_sets;
++ ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
++ if (!ip_set_list) {
++ printk(KERN_ERR "Unable to create ip_set_list\n");
++ return -ENOMEM;
++ }
++ memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
++ if (hash_size)
++ ip_set_bindings_hash_size = hash_size;
++ ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
++ if (!ip_set_hash) {
++ printk(KERN_ERR "Unable to create ip_set_hash\n");
++ vfree(ip_set_list);
++ return -ENOMEM;
++ }
++ for (i = 0; i < ip_set_bindings_hash_size; i++)
++ INIT_LIST_HEAD(&ip_set_hash[i]);
++
++ INIT_LIST_HEAD(&set_type_list);
++
++ res = nf_register_sockopt(&so_set);
++ if (res != 0) {
++ ip_set_printk("SO_SET registry failed: %d", res);
++ vfree(ip_set_list);
++ vfree(ip_set_hash);
++ return res;
++ }
++ return 0;
++}
++
++static void __exit ip_set_fini(void)
++{
++ /* There can't be any existing set or binding */
++ nf_unregister_sockopt(&so_set);
++ vfree(ip_set_list);
++ vfree(ip_set_hash);
++ DP("these are the famous last words");
++}
++
++EXPORT_SYMBOL(ip_set_register_set_type);
++EXPORT_SYMBOL(ip_set_unregister_set_type);
++
++EXPORT_SYMBOL(ip_set_get_byname);
++EXPORT_SYMBOL(ip_set_get_byindex);
++EXPORT_SYMBOL(ip_set_put);
++
++EXPORT_SYMBOL(ip_set_addip_kernel);
++EXPORT_SYMBOL(ip_set_delip_kernel);
++EXPORT_SYMBOL(ip_set_testip_kernel);
++
++module_init(ip_set_init);
++module_exit(ip_set_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iphash.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set_iphash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iphash.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,429 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_iphash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = ip & map->netmask;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ /* No shortcut at testing - there can be deleted
++ * entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == *hash_ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = *hash_ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __addip((struct ip_set_iphash *) set->data,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t hash_ip, *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_iphash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new hash size */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_iphash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iphash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ tmp->netmask = map->netmask;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_iphash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __addip(tmp, *elem, &hash_ip);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t id, *elem;
++
++ if (!ip)
++ return -ERANGE;
++
++ id = hash_id(set, ip, hash_ip);
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iphash_create *req =
++ (struct ip_set_req_iphash_create *) data;
++ struct ip_set_iphash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_iphash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_iphash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iphash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ map->netmask = req->netmask;
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_req_iphash_create *header =
++ (struct ip_set_req_iphash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++ header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_iphash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_iphash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iphash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_iphash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_iphash);
++}
++
++static void __exit ip_set_iphash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_iphash);
++}
++
++module_init(ip_set_iphash_init);
++module_exit(ip_set_iphash_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_ipmap.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set_ipmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_ipmap.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,336 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the single bitmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipmap.h>
++
++static inline ip_set_ip_t
++ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
++{
++ return (ip - map->first_ip)/map->hosts;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
++ return -EEXIST;
++
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
++ return __addip(set, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __addip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
++ return -EEXIST;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_ipmap_create *req =
++ (struct ip_set_req_ipmap_create *) data;
++ struct ip_set_ipmap *map;
++
++ if (size != sizeof(struct ip_set_req_ipmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++ HIPQUAD(req->from), HIPQUAD(req->to));
++
++ if (req->from > req->to) {
++ DP("bad ip range");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipmap));
++ return -ENOMEM;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ map->netmask = req->netmask;
++
++ if (req->netmask == 0xFFFFFFFF) {
++ map->hosts = 1;
++ map->sizeid = map->last_ip - map->first_ip + 1;
++ } else {
++ unsigned int mask_bits, netmask_bits;
++ ip_set_ip_t mask;
++
++ map->first_ip &= map->netmask; /* Should we better bark? */
++
++ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
++ netmask_bits = mask_to_bits(map->netmask);
++
++ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
++ || netmask_bits <= mask_bits)
++ return -ENOEXEC;
++
++ DP("mask_bits %u, netmask_bits %u",
++ mask_bits, netmask_bits);
++ map->hosts = 2 << (32 - netmask_bits - 1);
++ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
++ }
++ if (map->sizeid > MAX_RANGE + 1) {
++ ip_set_printk("range too big (max %d addresses)",
++ MAX_RANGE+1);
++ kfree(map);
++ return -ENOEXEC;
++ }
++ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
++ newbytes = bitmap_bytes(0, map->sizeid - 1);
++ map->members = kmalloc(newbytes, GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ kfree(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_req_ipmap_create *header =
++ (struct ip_set_req_ipmap_create *) data;
++
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++ header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ return bitmap_bytes(0, map->sizeid - 1);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ int bytes = bitmap_bytes(0, map->sizeid - 1);
++
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_ipmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_ipmap),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_ipmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipmap type of IP sets");
++
++static int __init ip_set_ipmap_init(void)
++{
++ return ip_set_register_set_type(&ip_set_ipmap);
++}
++
++static void __exit ip_set_ipmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_ipmap);
++}
++
++module_init(ip_set_ipmap_init);
++module_exit(ip_set_ipmap_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_ipporthash.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_ipporthash.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set_ipporthash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_ipporthash.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,581 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++
++static int limit = MAX_RANGE;
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ struct iphdr *iph = ip_hdr(skb);
++#else
++ struct iphdr *iph = skb->nh.iph;
++#endif
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++
++static inline __u32
++jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map =
++ (struct ip_set_ipporthash *) set->data;
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = HASH_IP(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ /* No shortcut at testing - there can be deleted
++ * entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++ int res;
++
++ if (flags[index+1] == 0)
++ return 0;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return 0;
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++ return (res < 0 ? 0 : res);
++
++}
++
++static inline int
++__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == hash_ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = hash_ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = HASH_IP(map, ip, port);
++
++ return __add_haship(map, *hash_ip);
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __addip((struct ip_set_ipporthash *) set->data,
++ req->ip, req->port, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++
++ if (flags[index+1] == 0)
++ return -EINVAL;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __addip((struct ip_set_ipporthash *) set->data,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_ipporthash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new hash size */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipporthash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __add_haship(tmp, *elem);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = hash_id(set, ip, port, hash_ip);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++
++ if (flags[index+1] == 0)
++ return -EINVAL;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_ipporthash_create *req =
++ (struct ip_set_req_ipporthash_create *) data;
++ struct ip_set_ipporthash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_ipporthash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipporthash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_req_ipporthash_create *header =
++ (struct ip_set_req_ipporthash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_ipporthash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_ipporthash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_ipporthash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_ipporthash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_ipporthash);
++}
++
++static void __exit ip_set_ipporthash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_ipporthash);
++}
++
++module_init(ip_set_ipporthash_init);
++module_exit(ip_set_ipporthash_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iptree.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set_iptree.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iptree.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,612 @@
++/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the iptree type */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++
++#include <linux/netfilter_ipv4/ip_set_iptree.h>
++
++static int limit = MAX_RANGE;
++
++/* Garbage collection interval in seconds: */
++#define IPTREE_GC_TIME 5*60
++/* Sleep so many milliseconds before trying again
++ * to delete the gc timer at destroying/flushing a set */
++#define IPTREE_DESTROY_SLEEP 100
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct kmem_cache *branch_cachep;
++static struct kmem_cache *leaf_cachep;
++#else
++static kmem_cache_t *branch_cachep;
++static kmem_cache_t *leaf_cachep;
++#endif
++
++#if defined(__LITTLE_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[3]; \
++ b = ((unsigned char *)addrp)[2]; \
++ c = ((unsigned char *)addrp)[1]; \
++ d = ((unsigned char *)addrp)[0]; \
++} while (0)
++#elif defined(__BIG_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[0]; \
++ b = ((unsigned char *)addrp)[1]; \
++ c = ((unsigned char *)addrp)[2]; \
++ d = ((unsigned char *)addrp)[3]; \
++} while (0)
++#else
++#error "Please fix asm/byteorder.h"
++#endif /* __LITTLE_ENDIAN */
++
++#define TESTIP_WALK(map, elem, branch) do { \
++ if ((map)->tree[elem]) { \
++ branch = (map)->tree[elem]; \
++ } else \
++ return 0; \
++} while (0)
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++
++ if (!ip)
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
++ TESTIP_WALK(map, a, btree);
++ TESTIP_WALK(btree, b, ctree);
++ TESTIP_WALK(ctree, c, dtree);
++ DP("%lu %lu", dtree->expires[d], jiffies);
++ return dtree->expires[d]
++ && (!map->timeout
++ || time_after(dtree->expires[d], jiffies));
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res;
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++ return (res < 0 ? 0 : res);
++}
++
++#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
++ if ((map)->tree[elem]) { \
++ DP("found %u", elem); \
++ branch = (map)->tree[elem]; \
++ } else { \
++ branch = (type *) \
++ kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (branch == NULL) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[elem] = branch; \
++ DP("alloc %u", elem); \
++ } \
++} while (0)
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++ int ret = 0;
++
++ if (!ip || map->elements >= limit)
++ /* We could call the garbage collector
++ * but it's probably overkill */
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
++ ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
++ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
++ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++ ret = -EEXIST;
++ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
++ /* Lottery: I won! */
++ if (dtree->expires[d] == 0)
++ dtree->expires[d] = 1;
++ DP("%u %lu", d, dtree->expires[d]);
++ if (ret == 0)
++ map->elements++;
++ return ret;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
++ return __addip(set, req->ip,
++ req->timeout ? req->timeout : map->timeout,
++ hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ return __addip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ map->timeout,
++ hash_ip);
++}
++
++#define DELIP_WALK(map, elem, branch) do { \
++ if ((map)->tree[elem]) { \
++ branch = (map)->tree[elem]; \
++ } else \
++ return -EEXIST; \
++} while (0)
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++
++ if (!ip)
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DELIP_WALK(map, a, btree);
++ DELIP_WALK(btree, b, ctree);
++ DELIP_WALK(ctree, c, dtree);
++
++ if (dtree->expires[d]) {
++ dtree->expires[d] = 0;
++ map->elements--;
++ return 0;
++ }
++ return -EEXIST;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++#define LOOP_WALK_BEGIN(map, i, branch) \
++ for (i = 0; i < 256; i++) { \
++ if (!(map)->tree[i]) \
++ continue; \
++ branch = (map)->tree[i]
++
++#define LOOP_WALK_END }
++
++static void ip_tree_gc(unsigned long ul_set)
++{
++ struct ip_set *set = (void *) ul_set;
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ unsigned char i,j,k;
++
++ i = j = k = 0;
++ DP("gc: %s", set->name);
++ write_lock_bh(&set->lock);
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]) {
++ DP("gc: %u %u %u %u: expires %lu jiffies %lu",
++ a, b, c, d,
++ dtree->expires[d], jiffies);
++ if (map->timeout
++ && time_before(dtree->expires[d], jiffies)) {
++ dtree->expires[d] = 0;
++ map->elements--;
++ } else
++ k = 1;
++ }
++ }
++ if (k == 0) {
++ DP("gc: %s: leaf %u %u %u empty",
++ set->name, a, b, c);
++ kmem_cache_free(leaf_cachep, dtree);
++ ctree->tree[c] = NULL;
++ } else {
++ DP("gc: %s: leaf %u %u %u not empty",
++ set->name, a, b, c);
++ j = 1;
++ k = 0;
++ }
++ LOOP_WALK_END;
++ if (j == 0) {
++ DP("gc: %s: branch %u %u empty",
++ set->name, a, b);
++ kmem_cache_free(branch_cachep, ctree);
++ btree->tree[b] = NULL;
++ } else {
++ DP("gc: %s: branch %u %u not empty",
++ set->name, a, b);
++ i = 1;
++ j = k = 0;
++ }
++ LOOP_WALK_END;
++ if (i == 0) {
++ DP("gc: %s: branch %u empty",
++ set->name, a);
++ kmem_cache_free(branch_cachep, btree);
++ map->tree[a] = NULL;
++ } else {
++ DP("gc: %s: branch %u not empty",
++ set->name, a);
++ i = j = k = 0;
++ }
++ LOOP_WALK_END;
++ write_unlock_bh(&set->lock);
++
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static inline void init_gc_timer(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ /* Even if there is no timeout for the entries,
++ * we still have to call gc because delete
++ * do not clean up empty branches */
++ map->gc_interval = IPTREE_GC_TIME;
++ init_timer(&map->gc);
++ map->gc.data = (unsigned long) set;
++ map->gc.function = ip_tree_gc;
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iptree_create *req =
++ (struct ip_set_req_iptree_create *) data;
++ struct ip_set_iptree *map;
++
++ if (size != sizeof(struct ip_set_req_iptree_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree_create),
++ size);
++ return -EINVAL;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iptree));
++ return -ENOMEM;
++ }
++ memset(map, 0, sizeof(*map));
++ map->timeout = req->timeout;
++ map->elements = 0;
++ set->data = map;
++
++ init_gc_timer(set);
++
++ return 0;
++}
++
++static void __flush(struct ip_set_iptree *map)
++{
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ kmem_cache_free(leaf_cachep, dtree);
++ LOOP_WALK_END;
++ kmem_cache_free(branch_cachep, ctree);
++ LOOP_WALK_END;
++ kmem_cache_free(branch_cachep, btree);
++ LOOP_WALK_END;
++ map->elements = 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ /* gc might be running */
++ while (!del_timer(&map->gc))
++ msleep(IPTREE_DESTROY_SLEEP);
++ __flush(map);
++ kfree(map);
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ unsigned int timeout = map->timeout;
++
++ /* gc might be running */
++ while (!del_timer(&map->gc))
++ msleep(IPTREE_DESTROY_SLEEP);
++ __flush(map);
++ memset(map, 0, sizeof(*map));
++ map->timeout = timeout;
++
++ init_gc_timer(set);
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_req_iptree_create *header =
++ (struct ip_set_req_iptree_create *) data;
++
++ header->timeout = map->timeout;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ unsigned int count = 0;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++ count++;
++ }
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++
++ DP("members %u", count);
++ return (count * sizeof(struct ip_set_req_iptree));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ size_t offset = 0;
++ struct ip_set_req_iptree *entry;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
++ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
++ entry->timeout = !map->timeout ? 0
++ : (dtree->expires[d] - jiffies)/HZ;
++ offset += sizeof(struct ip_set_req_iptree);
++ }
++ }
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++}
++
++static struct ip_set_type ip_set_iptree = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_iptree),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iptree_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptree type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_iptree_init(void)
++{
++ int ret;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ branch_cachep = kmem_cache_create("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb),
++ 0, 0, NULL);
++#else
++ branch_cachep = kmem_cache_create("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb),
++ 0, 0, NULL, NULL);
++#endif
++ if (!branch_cachep) {
++ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ leaf_cachep = kmem_cache_create("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed),
++ 0, 0, NULL);
++#else
++ leaf_cachep = kmem_cache_create("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed),
++ 0, 0, NULL, NULL);
++#endif
++ if (!leaf_cachep) {
++ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
++ ret = -ENOMEM;
++ goto free_branch;
++ }
++ ret = ip_set_register_set_type(&ip_set_iptree);
++ if (ret == 0)
++ goto out;
++
++ kmem_cache_destroy(leaf_cachep);
++ free_branch:
++ kmem_cache_destroy(branch_cachep);
++ out:
++ return ret;
++}
++
++static void __exit ip_set_iptree_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_iptree);
++ kmem_cache_destroy(leaf_cachep);
++ kmem_cache_destroy(branch_cachep);
++}
++
++module_init(ip_set_iptree_init);
++module_exit(ip_set_iptree_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_iptreemap.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iptreemap.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set_iptreemap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iptreemap.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,829 @@
++/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++/* This modules implements the iptreemap ipset type. It uses bitmaps to
++ * represent every single IPv4 address as a single bit. The bitmaps are managed
++ * in a tree structure, where the first three octets of an addresses are used
++ * as an index to find the bitmap and the last octet is used as the bit number.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
++
++#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
++#define IPTREEMAP_DESTROY_SLEEP (100)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct kmem_cache *cachep_b;
++static struct kmem_cache *cachep_c;
++static struct kmem_cache *cachep_d;
++#else
++static kmem_cache_t *cachep_b;
++static kmem_cache_t *cachep_c;
++static kmem_cache_t *cachep_d;
++#endif
++
++static struct ip_set_iptreemap_d *fullbitmap_d;
++static struct ip_set_iptreemap_c *fullbitmap_c;
++static struct ip_set_iptreemap_b *fullbitmap_b;
++
++#if defined(__LITTLE_ENDIAN)
++#define ABCD(a, b, c, d, addr) \
++ do { \
++ a = ((unsigned char *)addr)[3]; \
++ b = ((unsigned char *)addr)[2]; \
++ c = ((unsigned char *)addr)[1]; \
++ d = ((unsigned char *)addr)[0]; \
++ } while (0)
++#elif defined(__BIG_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[0]; \
++ b = ((unsigned char *)addrp)[1]; \
++ c = ((unsigned char *)addrp)[2]; \
++ d = ((unsigned char *)addrp)[3]; \
++} while (0)
++#else
++#error "Please fix asm/byteorder.h"
++#endif /* __LITTLE_ENDIAN */
++
++#define TESTIP_WALK(map, elem, branch, full) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) \
++ return 0; \
++ else if (branch == full) \
++ return 1; \
++ } while (0)
++
++#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) { \
++ branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (!branch) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[elem] = branch; \
++ } else if (branch == full) { \
++ return -EEXIST; \
++ } \
++ } while (0)
++
++#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
++ for (a = a1; a <= a2; a++) { \
++ branch = (map)->tree[a]; \
++ if (branch != full) { \
++ if ((a > a1 && a < a2) || (hint)) { \
++ if (branch) \
++ free(branch); \
++ (map)->tree[a] = full; \
++ continue; \
++ } else if (!branch) { \
++ branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (!branch) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[a] = branch; \
++ }
++
++#define ADDIP_RANGE_LOOP_END() \
++ } \
++ }
++
++#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) { \
++ return -EEXIST; \
++ } else if (branch == full) { \
++ branch = kmem_cache_alloc(cachep, flags); \
++ if (!branch) \
++ return -ENOMEM; \
++ memcpy(branch, full, sizeof(*full)); \
++ (map)->tree[elem] = branch; \
++ } \
++ } while (0)
++
++#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
++ for (a = a1; a <= a2; a++) { \
++ branch = (map)->tree[a]; \
++ if (branch) { \
++ if ((a > a1 && a < a2) || (hint)) { \
++ if (branch != full) \
++ free(branch); \
++ (map)->tree[a] = NULL; \
++ continue; \
++ } else if (branch == full) { \
++ branch = kmem_cache_alloc(cachep, flags); \
++ if (!branch) \
++ return -ENOMEM; \
++ memcpy(branch, full, sizeof(*branch)); \
++ (map)->tree[a] = branch; \
++ }
++
++#define DELIP_RANGE_LOOP_END() \
++ } \
++ }
++
++#define LOOP_WALK_BEGIN(map, i, branch) \
++ for (i = 0; i < 256; i++) { \
++ branch = (map)->tree[i]; \
++ if (likely(!branch)) \
++ continue;
++
++#define LOOP_WALK_END() \
++ }
++
++#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
++ count = -256; \
++ for (i = 0; i < 256; i++) { \
++ branch = (map)->tree[i]; \
++ if (likely(!branch)) \
++ continue; \
++ count++; \
++ if (branch == full) { \
++ count++; \
++ continue; \
++ }
++
++#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
++ if (-256 == count) { \
++ kmem_cache_free(cachep, branch); \
++ (map)->tree[i] = NULL; \
++ } else if (256 == count) { \
++ kmem_cache_free(cachep, branch); \
++ (map)->tree[i] = full; \
++ } \
++ }
++
++#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
++ for (i = 0; i < 256; i++) { \
++ if (!(map)->tree[i]) { \
++ if (inrange) { \
++ count++; \
++ inrange = 0; \
++ } \
++ continue; \
++ } \
++ branch = (map)->tree[i];
++
++#define LOOP_WALK_END_COUNT() \
++ }
++
++#define MIN(a, b) (a < b ? a : b)
++#define MAX(a, b) (a > b ? a : b)
++
++#define GETVALUE1(a, a1, b1, r) \
++ (a == a1 ? b1 : r)
++
++#define GETVALUE2(a, b, a1, b1, c1, r) \
++ (a == a1 && b == b1 ? c1 : r)
++
++#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
++ (a == a1 && b == b1 && c == c1 ? d1 : r)
++
++#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE1(a, a1, b1, 0) == 0 \
++ && GETVALUE1(a, a2, b2, 255) == 255 \
++ && c1 == 0 \
++ && c2 == 255 \
++ && d1 == 0 \
++ && d2 == 255 \
++ )
++
++#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
++ && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
++ && d1 == 0 \
++ && d2 == 255 \
++ )
++
++#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
++ && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
++ )
++
++
++static inline void
++free_d(struct ip_set_iptreemap_d *map)
++{
++ kmem_cache_free(cachep_d, map);
++}
++
++static inline void
++free_c(struct ip_set_iptreemap_c *map)
++{
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int i;
++
++ LOOP_WALK_BEGIN(map, i, dtree) {
++ if (dtree != fullbitmap_d)
++ free_d(dtree);
++ } LOOP_WALK_END();
++
++ kmem_cache_free(cachep_c, map);
++}
++
++static inline void
++free_b(struct ip_set_iptreemap_b *map)
++{
++ struct ip_set_iptreemap_c *ctree;
++ unsigned int i;
++
++ LOOP_WALK_BEGIN(map, i, ctree) {
++ if (ctree != fullbitmap_c)
++ free_c(ctree);
++ } LOOP_WALK_END();
++
++ kmem_cache_free(cachep_b, map);
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a, b, c, d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ TESTIP_WALK(map, a, btree, fullbitmap_b);
++ TESTIP_WALK(btree, b, ctree, fullbitmap_c);
++ TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
++
++ return !!test_bit(d, (void *) dtree->bitmap);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __testip(set, req->start, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++ int res;
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a, b, c, d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
++ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
++ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
++
++ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ return -EEXIST;
++
++ set_bit(b, (void *) btree->dirty);
++
++ return 0;
++}
++
++static inline int
++__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d;
++ unsigned char a1, b1, c1, d1;
++ unsigned char a2, b2, c2, d2;
++
++ if (start == end)
++ return __addip_single(set, start, hash_ip);
++
++ *hash_ip = start;
++
++ ABCD(a1, b1, c1, d1, &start);
++ ABCD(a2, b2, c2, d2, &end);
++
++ /* This is sooo ugly... */
++ ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
++ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
++ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
++ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
++ set_bit(d, (void *) dtree->bitmap);
++ set_bit(b, (void *) btree->dirty);
++ } ADDIP_RANGE_LOOP_END();
++ } ADDIP_RANGE_LOOP_END();
++ } ADDIP_RANGE_LOOP_END();
++
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++
++ return __addip_single(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a,b,c,d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
++ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
++ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
++
++ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ return -EEXIST;
++
++ set_bit(b, (void *) btree->dirty);
++
++ return 0;
++}
++
++static inline int
++__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d;
++ unsigned char a1, b1, c1, d1;
++ unsigned char a2, b2, c2, d2;
++
++ if (start == end)
++ return __delip_single(set, start, hash_ip, flags);
++
++ *hash_ip = start;
++
++ ABCD(a1, b1, c1, d1, &start);
++ ABCD(a2, b2, c2, d2, &end);
++
++ /* This is sooo ugly... */
++ DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
++ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
++ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
++ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
++ clear_bit(d, (void *) dtree->bitmap);
++ set_bit(b, (void *) btree->dirty);
++ } DELIP_RANGE_LOOP_END();
++ } DELIP_RANGE_LOOP_END();
++ } DELIP_RANGE_LOOP_END();
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
++}
++
++static int
++delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++ return __delip_single(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip,
++ GFP_ATOMIC);
++}
++
++/* Check the status of the bitmap
++ * -1 == all bits cleared
++ * 1 == all bits set
++ * 0 == anything else
++ */
++static inline int
++bitmap_status(struct ip_set_iptreemap_d *dtree)
++{
++ unsigned char first = dtree->bitmap[0];
++ int a;
++
++ for (a = 1; a < 32; a++)
++ if (dtree->bitmap[a] != first)
++ return 0;
++
++ return (first == 0 ? -1 : (first == 255 ? 1 : 0));
++}
++
++static void
++gc(unsigned long addr)
++{
++ struct ip_set *set = (struct ip_set *) addr;
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c;
++ int i, j, k;
++
++ write_lock_bh(&set->lock);
++
++ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
++ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
++ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ continue;
++ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
++ switch (bitmap_status(dtree)) {
++ case -1:
++ kmem_cache_free(cachep_d, dtree);
++ ctree->tree[c] = NULL;
++ k--;
++ break;
++ case 1:
++ kmem_cache_free(cachep_d, dtree);
++ ctree->tree[c] = fullbitmap_d;
++ k++;
++ break;
++ }
++ } LOOP_WALK_END();
++ } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
++ } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
++
++ write_unlock_bh(&set->lock);
++
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static inline void
++init_gc_timer(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ init_timer(&map->gc);
++ map->gc.data = (unsigned long) set;
++ map->gc.function = gc;
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map;
++
++ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
++ return -EINVAL;
++ }
++
++ map = kzalloc(sizeof(*map), GFP_KERNEL);
++ if (!map)
++ return -ENOMEM;
++
++ map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
++ set->data = map;
++
++ init_gc_timer(set);
++
++ return 0;
++}
++
++static inline void __flush(struct ip_set_iptreemap *map)
++{
++ struct ip_set_iptreemap_b *btree;
++ unsigned int a;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ if (btree != fullbitmap_b)
++ free_b(btree);
++ LOOP_WALK_END();
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ while (!del_timer(&map->gc))
++ msleep(IPTREEMAP_DESTROY_SLEEP);
++
++ __flush(map);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ while (!del_timer(&map->gc))
++ msleep(IPTREEMAP_DESTROY_SLEEP);
++
++ __flush(map);
++
++ memset(map, 0, sizeof(*map));
++
++ init_gc_timer(set);
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++
++ header->gc_interval = map->gc_interval;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d, inrange = 0, count = 0;
++
++ LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
++ LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
++ LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
++ for (d = 0; d < 256; d++) {
++ if (test_bit(d, (void *) dtree->bitmap)) {
++ inrange = 1;
++ } else if (inrange) {
++ count++;
++ inrange = 0;
++ }
++ }
++ } LOOP_WALK_END_COUNT();
++ } LOOP_WALK_END_COUNT();
++ } LOOP_WALK_END_COUNT();
++
++ if (inrange)
++ count++;
++
++ return (count * sizeof(struct ip_set_req_iptreemap));
++}
++
++static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++{
++ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++
++ entry->start = start;
++ entry->end = end;
++
++ return sizeof(*entry);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d, inrange = 0;
++ size_t offset = 0;
++ ip_set_ip_t start = 0, end = 0, ip;
++
++ LOOP_WALK_BEGIN(map, a, btree) {
++ LOOP_WALK_BEGIN(btree, b, ctree) {
++ LOOP_WALK_BEGIN(ctree, c, dtree) {
++ for (d = 0; d < 256; d++) {
++ if (test_bit(d, (void *) dtree->bitmap)) {
++ ip = ((a << 24) | (b << 16) | (c << 8) | d);
++ if (!inrange) {
++ inrange = 1;
++ start = ip;
++ } else if (end < ip - 1) {
++ offset += add_member(data, offset, start, end);
++ start = ip;
++ }
++ end = ip;
++ } else if (inrange) {
++ offset += add_member(data, offset, start, end);
++ inrange = 0;
++ }
++ }
++ } LOOP_WALK_END();
++ } LOOP_WALK_END();
++ } LOOP_WALK_END();
++
++ if (inrange)
++ add_member(data, offset, start, end);
++}
++
++static struct ip_set_type ip_set_iptreemap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = create,
++ .destroy = destroy,
++ .flush = flush,
++ .reqsize = sizeof(struct ip_set_req_iptreemap),
++ .addip = addip,
++ .addip_kernel = addip_kernel,
++ .delip = delip,
++ .delip_kernel = delip_kernel,
++ .testip = testip,
++ .testip_kernel = testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iptreemap_create),
++ .list_header = list_header,
++ .list_members_size = list_members_size,
++ .list_members = list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
++MODULE_DESCRIPTION("iptreemap type of IP sets");
++
++static int __init ip_set_iptreemap_init(void)
++{
++ int ret = -ENOMEM;
++ int a;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b),
++ 0, 0, NULL);
++#else
++ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_b) {
++ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
++ goto out;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c),
++ 0, 0, NULL);
++#else
++ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_c) {
++ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
++ goto outb;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d),
++ 0, 0, NULL);
++#else
++ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_d) {
++ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
++ goto outc;
++ }
++
++ fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
++ if (!fullbitmap_d)
++ goto outd;
++
++ fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
++ if (!fullbitmap_c)
++ goto outbitmapd;
++
++ fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
++ if (!fullbitmap_b)
++ goto outbitmapc;
++
++ ret = ip_set_register_set_type(&ip_set_iptreemap);
++ if (0 > ret)
++ goto outbitmapb;
++
++ /* Now init our global bitmaps */
++ memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
++
++ for (a = 0; a < 256; a++)
++ fullbitmap_c->tree[a] = fullbitmap_d;
++
++ for (a = 0; a < 256; a++)
++ fullbitmap_b->tree[a] = fullbitmap_c;
++ memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
++
++ return 0;
++
++outbitmapb:
++ kmem_cache_free(cachep_b, fullbitmap_b);
++outbitmapc:
++ kmem_cache_free(cachep_c, fullbitmap_c);
++outbitmapd:
++ kmem_cache_free(cachep_d, fullbitmap_d);
++outd:
++ kmem_cache_destroy(cachep_d);
++outc:
++ kmem_cache_destroy(cachep_c);
++outb:
++ kmem_cache_destroy(cachep_b);
++out:
++
++ return ret;
++}
++
++static void __exit ip_set_iptreemap_fini(void)
++{
++ ip_set_unregister_set_type(&ip_set_iptreemap);
++ kmem_cache_free(cachep_d, fullbitmap_d);
++ kmem_cache_free(cachep_c, fullbitmap_c);
++ kmem_cache_free(cachep_b, fullbitmap_b);
++ kmem_cache_destroy(cachep_d);
++ kmem_cache_destroy(cachep_c);
++ kmem_cache_destroy(cachep_b);
++}
++
++module_init(ip_set_iptreemap_init);
++module_exit(ip_set_iptreemap_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_macipmap.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set_macipmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_macipmap.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,375 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the macipmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/if_ether.h>
++#include <linux/vmalloc.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_macipmap.h>
++
++static int
++testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
++ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->ip < map->first_ip || req->ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = req->ip;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
++ if (test_bit(IPSET_MACIP_ISSET,
++ (void *) &table[req->ip - map->first_ip].flags)) {
++ return (memcmp(req->ethernet,
++ &table[req->ip - map->first_ip].ethernet,
++ ETH_ALEN) == 0);
++ } else {
++ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++ }
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++ ip_set_ip_t ip;
++
++ ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return 0;
++
++ *hash_ip = ip;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (test_bit(IPSET_MACIP_ISSET,
++ (void *) &table[ip - map->first_ip].flags)) {
++ /* Is mac pointer valid?
++ * If so, compare... */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ return (skb_mac_header(skb) >= skb->head
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
++#else
++ return (skb->mac.raw >= skb->head
++ && (skb->mac.raw + ETH_HLEN) <= skb->data
++#endif
++ && (memcmp(eth_hdr(skb)->h_source,
++ &table[ip - map->first_ip].ethernet,
++ ETH_ALEN) == 0));
++ } else {
++ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++ }
++}
++
++/* returns 0 on success */
++static inline int
++__addip(struct ip_set *set,
++ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(IPSET_MACIP_ISSET,
++ (void *) &table[ip - map->first_ip].flags))
++ return -EEXIST;
++
++ *hash_ip = ip;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_macipmap *req =
++ (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++ return __addip(set, req->ip, req->ethernet, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t ip;
++
++ ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (!(skb_mac_header(skb) >= skb->head
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
++#else
++ if (!(skb->mac.raw >= skb->head
++ && (skb->mac.raw + ETH_HLEN) <= skb->data))
++#endif
++ return -EINVAL;
++
++ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
++ (void *)&table[ip - map->first_ip].flags))
++ return -EEXIST;
++
++ *hash_ip = ip;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_macipmap *req =
++ (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
++{
++ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_macipmap_create *req =
++ (struct ip_set_req_macipmap_create *) data;
++ struct ip_set_macipmap *map;
++
++ if (size != sizeof(struct ip_set_req_macipmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++ HIPQUAD(req->from), HIPQUAD(req->to));
++
++ if (req->from > req->to) {
++ DP("bad ip range");
++ return -ENOEXEC;
++ }
++
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big (max %d addresses)",
++ MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_macipmap));
++ return -ENOMEM;
++ }
++ map->flags = req->flags;
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ newbytes = members_size(map->first_ip, map->last_ip);
++ map->members = ip_set_malloc(newbytes);
++ DP("members: %u %p", newbytes, map->members);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_req_macipmap_create *header =
++ (struct ip_set_req_macipmap_create *) data;
++
++ DP("list_header %x %x %u", map->first_ip, map->last_ip,
++ map->flags);
++
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++ header->flags = map->flags;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ DP("%u", members_size(map->first_ip, map->last_ip));
++ return members_size(map->first_ip, map->last_ip);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ int bytes = members_size(map->first_ip, map->last_ip);
++
++ DP("members: %u %p", bytes, map->members);
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_macipmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_macipmap),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_macipmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("macipmap type of IP sets");
++
++static int __init ip_set_macipmap_init(void)
++{
++ init_max_malloc_size();
++ return ip_set_register_set_type(&ip_set_macipmap);
++}
++
++static void __exit ip_set_macipmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_macipmap);
++}
++
++module_init(ip_set_macipmap_init);
++module_exit(ip_set_macipmap_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_nethash.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set_nethash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_nethash.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,497 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing a cidr nethash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_nethash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id_cidr(struct ip_set_nethash *map,
++ ip_set_ip_t ip,
++ unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = pack(ip, cidr);
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ }
++ return UINT_MAX;
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ __u32 id = UINT_MAX;
++ int i;
++
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ if (id != UINT_MAX)
++ break;
++ }
++ return id;
++}
++
++static inline int
++__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
++ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
++
++ *hash_ip = pack(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++
++ return __addip_base(map, *hash_ip);
++}
++
++static void
++update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
++{
++ unsigned char next;
++ int i;
++
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ if (map->cidr[i] == cidr) {
++ return;
++ } else if (map->cidr[i] < cidr) {
++ next = map->cidr[i];
++ map->cidr[i] = cidr;
++ cidr = next;
++ }
++ }
++ if (i < 30)
++ map->cidr[i] = cidr;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++ int ret;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ ret = __addip((struct ip_set_nethash *) set->data,
++ req->ip, req->cidr, hash_ip);
++
++ if (ret == 0)
++ update_cidr_sizes((struct ip_set_nethash *) set->data,
++ req->cidr);
++
++ return ret;
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ int ret = -ERANGE;
++ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (map->cidr[0])
++ ret = __addip(map, ip, map->cidr[0], hash_ip);
++
++ return ret;
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ ip_set_ip_t *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_nethash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new parameters */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_nethash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_nethash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_nethash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __addip_base(tmp, *elem);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ ip_set_ip_t id, *elem;
++
++ if (!ip)
++ return -ERANGE;
++
++ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ /* TODO: no garbage collection in map->cidr */
++ return __delip((struct ip_set_nethash *) set->data,
++ req->ip, req->cidr, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ int ret = -ERANGE;
++ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (map->cidr[0])
++ ret = __delip(map, ip, map->cidr[0], hash_ip);
++
++ return ret;
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_nethash_create *req =
++ (struct ip_set_req_nethash_create *) data;
++ struct ip_set_nethash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_nethash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_nethash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_nethash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ memset(map->cidr, 0, 30 * sizeof(unsigned char));
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ memset(map->cidr, 0, 30 * sizeof(unsigned char));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ struct ip_set_req_nethash_create *header =
++ (struct ip_set_req_nethash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_nethash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_nethash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_nethash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("nethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_nethash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_nethash);
++}
++
++static void __exit ip_set_nethash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_nethash);
++}
++
++module_init(ip_set_nethash_init);
++module_exit(ip_set_nethash_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_portmap.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_portmap.c
+--- linux-2.6.23/net/ipv4/netfilter/ip_set_portmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_portmap.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,346 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing a port set type as a bitmap */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_portmap.h>
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ struct iphdr *iph = ip_hdr(skb);
++#else
++ struct iphdr *iph = skb->nh.iph;
++#endif
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++
++static inline int
++__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_port, map->members);
++}
++
++static int
++testport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __testport(set, req->port, hash_port);
++}
++
++static int
++testport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res;
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
++ if (port == INVALID_PORT)
++ return 0;
++
++ res = __testport(set, port, hash_port);
++
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_port, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
++}
++
++static int
++addport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __addport(set, req->port, hash_port);
++}
++
++static int
++addport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __addport(set, port, hash_port);
++}
++
++static inline int
++__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++ if (!test_and_clear_bit(port - map->first_port, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
++}
++
++static int
++delport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __delport(set, req->port, hash_port);
++}
++
++static int
++delport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __delport(set, port, hash_port);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_portmap_create *req =
++ (struct ip_set_req_portmap_create *) data;
++ struct ip_set_portmap *map;
++
++ if (size != sizeof(struct ip_set_req_portmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u to %u", req->from, req->to);
++
++ if (req->from > req->to) {
++ DP("bad port range");
++ return -ENOEXEC;
++ }
++
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big (max %d ports)",
++ MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_portmap));
++ return -ENOMEM;
++ }
++ map->first_port = req->from;
++ map->last_port = req->to;
++ newbytes = bitmap_bytes(req->from, req->to);
++ map->members = kmalloc(newbytes, GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ kfree(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_req_portmap_create *header =
++ (struct ip_set_req_portmap_create *) data;
++
++ DP("list_header %u %u", map->first_port, map->last_port);
++
++ header->from = map->first_port;
++ header->to = map->last_port;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ return bitmap_bytes(map->first_port, map->last_port);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ int bytes = bitmap_bytes(map->first_port, map->last_port);
++
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_portmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_portmap),
++ .addip = &addport,
++ .addip_kernel = &addport_kernel,
++ .delip = &delport,
++ .delip_kernel = &delport_kernel,
++ .testip = &testport,
++ .testip_kernel = &testport_kernel,
++ .header_size = sizeof(struct ip_set_req_portmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
++
++static int __init ip_set_portmap_init(void)
++{
++ return ip_set_register_set_type(&ip_set_portmap);
++}
++
++static void __exit ip_set_portmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_portmap);
++}
++
++module_init(ip_set_portmap_init);
++module_exit(ip_set_portmap_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ipt_set.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ipt_set.c
+--- linux-2.6.23/net/ipv4/netfilter/ipt_set.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ipt_set.c 2007-10-12 11:52:38.000000000 +0200
+@@ -0,0 +1,160 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module to match an IP set. */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ipt_set.h>
++
++static inline int
++match_set(const struct ipt_set_info *info,
++ const struct sk_buff *skb,
++ int inv)
++{
++ if (ip_set_testip_kernel(info->index, skb, info->flags))
++ inv = !inv;
++ return inv;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++static bool
++#else
++static int
++#endif
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++ const void *matchinfo,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ int offset, unsigned int protoff, bool *hotdrop)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ int offset, unsigned int protoff, int *hotdrop)
++#else
++ int offset, int *hotdrop)
++#endif
++{
++ const struct ipt_set_info_match *info = matchinfo;
++
++ return match_set(&info->match_set,
++ skb,
++ info->match_set.flags[0] & IPSET_MATCH_INV);
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++bool
++#else
++static int
++#endif
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ const void *inf,
++#else
++ const struct ipt_ip *ip,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++ void *matchinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ unsigned int matchsize,
++#endif
++ unsigned int hook_mask)
++{
++ struct ipt_set_info_match *info =
++ (struct ipt_set_info_match *) matchinfo;
++ ip_set_id_t index;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
++ ip_set_printk("invalid matchsize %d", matchsize);
++ return 0;
++ }
++#endif
++
++ index = ip_set_get_byindex(info->match_set.index);
++
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("Cannot find set indentified by id %u to match",
++ info->match_set.index);
++ return 0; /* error */
++ }
++ if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
++ ip_set_printk("That's nasty!");
++ return 0; /* error */
++ }
++
++ return 1;
++}
++
++static void destroy(
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ void *matchinfo, unsigned int matchsize)
++#else
++ void *matchinfo)
++#endif
++{
++ struct ipt_set_info_match *info = matchinfo;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
++ ip_set_printk("invalid matchsize %d", matchsize);
++ return;
++ }
++#endif
++ ip_set_put(info->match_set.index);
++}
++
++static struct ipt_match set_match = {
++ .name = "set",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++ .family = AF_INET,
++#endif
++ .match = &match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ .matchsize = sizeof(struct ipt_set_info_match),
++#endif
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptables IP set match module");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++#define ipt_register_match xt_register_match
++#define ipt_unregister_match xt_unregister_match
++#endif
++
++static int __init ipt_ipset_init(void)
++{
++ return ipt_register_match(&set_match);
++}
++
++static void __exit ipt_ipset_fini(void)
++{
++ ipt_unregister_match(&set_match);
++}
++
++module_init(ipt_ipset_init);
++module_exit(ipt_ipset_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/ipt_SET.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ipt_SET.c
+--- linux-2.6.23/net/ipv4/netfilter/ipt_SET.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ipt_SET.c 2007-10-12 11:52:37.000000000 +0200
+@@ -0,0 +1,172 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* ipt_SET.c - netfilter target to manipulate IP sets */
++
++#include <linux/types.h>
++#include <linux/ip.h>
++#include <linux/timer.h>
++#include <linux/module.h>
++#include <linux/netfilter.h>
++#include <linux/netdevice.h>
++#include <linux/if.h>
++#include <linux/inetdevice.h>
++#include <linux/version.h>
++#include <net/protocol.h>
++#include <net/checksum.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_set.h>
++
++static unsigned int
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ const void *targinfo,
++ void *userinfo)
++#else
++ const void *targinfo)
++#endif
++{
++ const struct ipt_set_info_target *info = targinfo;
++
++ if (info->add_set.index != IP_SET_INVALID_ID)
++ ip_set_addip_kernel(info->add_set.index,
++ *pskb,
++ info->add_set.flags);
++ if (info->del_set.index != IP_SET_INVALID_ID)
++ ip_set_delip_kernel(info->del_set.index,
++ *pskb,
++ info->del_set.flags);
++
++ return IPT_CONTINUE;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++static bool
++#else
++static int
++#endif
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ const void *e,
++#else
++ const struct ipt_entry *e,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++ void *targinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ unsigned int targinfosize,
++#endif
++ unsigned int hook_mask)
++{
++ struct ipt_set_info_target *info =
++ (struct ipt_set_info_target *) targinfo;
++ ip_set_id_t index;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
++ DP("bad target info size %u", targinfosize);
++ return 0;
++ }
++#endif
++
++ if (info->add_set.index != IP_SET_INVALID_ID) {
++ index = ip_set_get_byindex(info->add_set.index);
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("cannot find add_set index %u as target",
++ info->add_set.index);
++ return 0; /* error */
++ }
++ }
++
++ if (info->del_set.index != IP_SET_INVALID_ID) {
++ index = ip_set_get_byindex(info->del_set.index);
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("cannot find del_set index %u as target",
++ info->del_set.index);
++ return 0; /* error */
++ }
++ }
++ if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
++ || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
++ ip_set_printk("That's nasty!");
++ return 0; /* error */
++ }
++
++ return 1;
++}
++
++static void destroy(
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ void *targetinfo, unsigned int targetsize)
++#else
++ void *targetinfo)
++#endif
++{
++ struct ipt_set_info_target *info = targetinfo;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
++ ip_set_printk("invalid targetsize %d", targetsize);
++ return;
++ }
++#endif
++ if (info->add_set.index != IP_SET_INVALID_ID)
++ ip_set_put(info->add_set.index);
++ if (info->del_set.index != IP_SET_INVALID_ID)
++ ip_set_put(info->del_set.index);
++}
++
++static struct ipt_target SET_target = {
++ .name = "SET",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++ .family = AF_INET,
++#endif
++ .target = target,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ .targetsize = sizeof(struct ipt_set_info_target),
++#endif
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptables IP set target module");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++#define ipt_register_target xt_register_target
++#define ipt_unregister_target xt_unregister_target
++#endif
++
++static int __init ipt_SET_init(void)
++{
++ return ipt_register_target(&SET_target);
++}
++
++static void __exit ipt_SET_fini(void)
++{
++ ipt_unregister_target(&SET_target);
++}
++
++module_init(ipt_SET_init);
++module_exit(ipt_SET_fini);
+diff -Nru linux-2.6.23/net/ipv4/netfilter/Kconfig linux-2.6.23.pom2patch.set/net/ipv4/netfilter/Kconfig
+--- linux-2.6.23/net/ipv4/netfilter/Kconfig 2007-10-09 22:31:38.000000000 +0200
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/Kconfig 2007-10-12 11:52:38.000000000 +0200
+@@ -402,5 +402,122 @@
+ Allows altering the ARP packet payload: source and destination
+ hardware and network addresses.
+
++config IP_NF_SET
++ tristate "IP set support"
++ depends on INET && NETFILTER
++ help
++ This option adds IP set support to the kernel.
++ In order to define and use sets, you need the userspace utility
++ ipset(8).
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_MAX
++ int "Maximum number of IP sets"
++ default 256
++ range 2 65534
++ depends on IP_NF_SET
++ help
++ You can define here default value of the maximum number
++ of IP sets for the kernel.
++
++ The value can be overriden by the 'max_sets' module
++ parameter of the 'ip_set' module.
++
++config IP_NF_SET_HASHSIZE
++ int "Hash size for bindings of IP sets"
++ default 1024
++ depends on IP_NF_SET
++ help
++ You can define here default value of the hash size for
++ bindings of IP sets.
++
++ The value can be overriden by the 'hash_size' module
++ parameter of the 'ip_set' module.
++
++config IP_NF_SET_IPMAP
++ tristate "ipmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_MACIPMAP
++ tristate "macipmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the macipmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_PORTMAP
++ tristate "portmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the portmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPHASH
++ tristate "iphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_NETHASH
++ tristate "nethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the nethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTHASH
++ tristate "ipporthash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipporthash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPTREE
++ tristate "iptree set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iptree set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPTREEMAP
++ tristate "iptreemap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iptreemap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_MATCH_SET
++ tristate "set match support"
++ depends on IP_NF_SET
++ help
++ Set matching matches against given IP sets.
++ You need the ipset utility to create and set up the sets.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_TARGET_SET
++ tristate "SET target support"
++ depends on IP_NF_SET
++ help
++ The SET target makes possible to add/delete entries
++ in IP sets.
++ You need the ipset utility to create and set up the sets.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++
+ endmenu
+
+diff -Nru linux-2.6.23/net/ipv4/netfilter/Makefile linux-2.6.23.pom2patch.set/net/ipv4/netfilter/Makefile
+--- linux-2.6.23/net/ipv4/netfilter/Makefile 2007-10-09 22:31:38.000000000 +0200
++++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/Makefile 2007-10-12 11:52:38.000000000 +0200
+@@ -48,6 +48,7 @@
+ obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
++obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
+ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+
+ # targets
+@@ -62,6 +63,18 @@
+ obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
+ obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
+ obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
++obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
++
++# sets
++obj-$(CONFIG_IP_NF_SET) += ip_set.o
++obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
++obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
++obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
++obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
++obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
++obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
+
+ # generic ARP tables
+ obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
+++ /dev/null
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set.h 2007-06-08 16:29:31.825808000 -0500
-@@ -0,0 +1,498 @@
-+#ifndef _IP_SET_H
-+#define _IP_SET_H
-+
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#if 0
-+#define IP_SET_DEBUG
-+#endif
-+
-+/*
-+ * A sockopt of such quality has hardly ever been seen before on the open
-+ * market! This little beauty, hardly ever used: above 64, so it's
-+ * traditionally used for firewalling, not touched (even once!) by the
-+ * 2.0, 2.2 and 2.4 kernels!
-+ *
-+ * Comes with its own certificate of authenticity, valid anywhere in the
-+ * Free world!
-+ *
-+ * Rusty, 19.4.2000
-+ */
-+#define SO_IP_SET 83
-+
-+/*
-+ * Heavily modify by Joakim Axelsson 08.03.2002
-+ * - Made it more modulebased
-+ *
-+ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
-+ * - bindings added
-+ * - in order to "deal with" backward compatibility, renamed to ipset
-+ */
-+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
-+ */
-+#define IP_SET_PROTOCOL_VERSION 2
-+
-+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
-+
-+/* Lets work with our own typedef for representing an IP address.
-+ * We hope to make the code more portable, possibly to IPv6...
-+ *
-+ * The representation works in HOST byte order, because most set types
-+ * will perform arithmetic operations and compare operations.
-+ *
-+ * For now the type is an uint32_t.
-+ *
-+ * Make sure to ONLY use the functions when translating and parsing
-+ * in order to keep the host byte order and make it more portable:
-+ * parse_ip()
-+ * parse_mask()
-+ * parse_ipandmask()
-+ * ip_tostring()
-+ * (Joakim: where are they???)
-+ */
-+
-+typedef uint32_t ip_set_ip_t;
-+
-+/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
-+ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
-+ */
-+typedef uint16_t ip_set_id_t;
-+
-+#define IP_SET_INVALID_ID 65535
-+
-+/* How deep we follow bindings */
-+#define IP_SET_MAX_BINDINGS 6
-+
-+/*
-+ * Option flags for kernel operations (ipt_set_info)
-+ */
-+#define IPSET_SRC 0x01 /* Source match/add */
-+#define IPSET_DST 0x02 /* Destination match/add */
-+#define IPSET_MATCH_INV 0x04 /* Inverse matching */
-+
-+/*
-+ * Set features
-+ */
-+#define IPSET_TYPE_IP 0x01 /* IP address type of set */
-+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
-+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
-+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
-+
-+/* Reserved keywords */
-+#define IPSET_TOKEN_DEFAULT ":default:"
-+#define IPSET_TOKEN_ALL ":all:"
-+
-+/* SO_IP_SET operation constants, and their request struct types.
-+ *
-+ * Operation ids:
-+ * 0-99: commands with version checking
-+ * 100-199: add/del/test/bind/unbind
-+ * 200-299: list, save, restore
-+ */
-+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
-+ *
-+ * Sets are identified by name.
-+ */
-+
-+#define IP_SET_REQ_STD \
-+ unsigned op; \
-+ unsigned version; \
-+ char name[IP_SET_MAXNAMELEN]
-+
-+#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
-+struct ip_set_req_create {
-+ IP_SET_REQ_STD;
-+ char typename[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
-+struct ip_set_req_std {
-+ IP_SET_REQ_STD;
-+};
-+
-+#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
-+/* Uses ip_set_req_std */
-+
-+#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
-+/* Uses ip_set_req_create */
-+
-+#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
-+/* Uses ip_set_req_create */
-+
-+union ip_set_name_index {
-+ char name[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+};
-+
-+#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
-+struct ip_set_req_get_set {
-+ unsigned op;
-+ unsigned version;
-+ union ip_set_name_index set;
-+};
-+
-+#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
-+/* Uses ip_set_req_get_set */
-+
-+#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
-+struct ip_set_req_version {
-+ unsigned op;
-+ unsigned version;
-+};
-+
-+/* Double shots operations:
-+ * add, del, test, bind and unbind.
-+ *
-+ * First we query the kernel to get the index and type of the target set,
-+ * then issue the command. Validity of IP is checked in kernel in order
-+ * to minimalize sockopt operations.
-+ */
-+
-+/* Get minimal set data for add/del/test/bind/unbind IP */
-+#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
-+struct ip_set_req_adt_get {
-+ unsigned op;
-+ unsigned version;
-+ union ip_set_name_index set;
-+ char typename[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_REQ_BYINDEX \
-+ unsigned op; \
-+ ip_set_id_t index;
-+
-+struct ip_set_req_adt {
-+ IP_SET_REQ_BYINDEX;
-+};
-+
-+#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
-+/* Uses ip_set_req_bind, with type specific addage */
-+struct ip_set_req_bind {
-+ IP_SET_REQ_BYINDEX;
-+ char binding[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
-+ * index = 0 means unbinding for all sets */
-+
-+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
-+/* Uses ip_set_req_bind, with type specific addage */
-+
-+/* Multiple shots operations: list, save, restore.
-+ *
-+ * - check kernel version and query the max number of sets
-+ * - get the basic information on all sets
-+ * and size required for the next step
-+ * - get actual set data: header, data, bindings
-+ */
-+
-+/* Get max_sets and the index of a queried set
-+ */
-+#define IP_SET_OP_MAX_SETS 0x00000020
-+struct ip_set_req_max_sets {
-+ unsigned op;
-+ unsigned version;
-+ ip_set_id_t max_sets; /* max_sets */
-+ ip_set_id_t sets; /* real number of sets */
-+ union ip_set_name_index set; /* index of set if name used */
-+};
-+
-+/* Get the id and name of the sets plus size for next step */
-+#define IP_SET_OP_LIST_SIZE 0x00000201
-+#define IP_SET_OP_SAVE_SIZE 0x00000202
-+struct ip_set_req_setnames {
-+ unsigned op;
-+ ip_set_id_t index; /* set to list/save */
-+ size_t size; /* size to get setdata/bindings */
-+ /* followed by sets number of struct ip_set_name_list */
-+};
-+
-+struct ip_set_name_list {
-+ char name[IP_SET_MAXNAMELEN];
-+ char typename[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+ ip_set_id_t id;
-+};
-+
-+/* The actual list operation */
-+#define IP_SET_OP_LIST 0x00000203
-+struct ip_set_req_list {
-+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
-+};
-+
-+struct ip_set_list {
-+ ip_set_id_t index;
-+ ip_set_id_t binding;
-+ u_int32_t ref;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+ size_t bindings_size; /* Set bindings data of bindings_size */
-+};
-+
-+struct ip_set_hash_list {
-+ ip_set_ip_t ip;
-+ ip_set_id_t binding;
-+};
-+
-+/* The save operation */
-+#define IP_SET_OP_SAVE 0x00000204
-+/* Uses ip_set_req_list, in the reply replaced by
-+ * sets number of struct ip_set_save plus a marker
-+ * ip_set_save followed by ip_set_hash_save structures.
-+ */
-+struct ip_set_save {
-+ ip_set_id_t index;
-+ ip_set_id_t binding;
-+ size_t header_size; /* Set header data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+};
-+
-+/* At restoring, ip == 0 means default binding for the given set: */
-+struct ip_set_hash_save {
-+ ip_set_ip_t ip;
-+ ip_set_id_t id;
-+ ip_set_id_t binding;
-+};
-+
-+/* The restore operation */
-+#define IP_SET_OP_RESTORE 0x00000205
-+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
-+ * structures.
-+ */
-+struct ip_set_restore {
-+ char name[IP_SET_MAXNAMELEN];
-+ char typename[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+ size_t header_size; /* Create data of header_size */
-+ size_t members_size; /* Set members data of members_size */
-+};
-+
-+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
-+{
-+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
-+}
-+
-+#ifdef __KERNEL__
-+
-+#define ip_set_printk(format, args...) \
-+ do { \
-+ printk("%s: %s: ", __FILE__, __FUNCTION__); \
-+ printk(format "\n" , ## args); \
-+ } while (0)
-+
-+#if defined(IP_SET_DEBUG)
-+#define DP(format, args...) \
-+ do { \
-+ printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
-+ printk(format "\n" , ## args); \
-+ } while (0)
-+#define IP_SET_ASSERT(x) \
-+ do { \
-+ if (!(x)) \
-+ printk("IP_SET_ASSERT: %s:%i(%s)\n", \
-+ __FILE__, __LINE__, __FUNCTION__); \
-+ } while (0)
-+#else
-+#define DP(format, args...)
-+#define IP_SET_ASSERT(x)
-+#endif
-+
-+struct ip_set;
-+
-+/*
-+ * The ip_set_type definition - one per set type, e.g. "ipmap".
-+ *
-+ * Each individual set has a pointer, set->type, going to one
-+ * of these structures. Function pointers inside the structure implement
-+ * the real behaviour of the sets.
-+ *
-+ * If not mentioned differently, the implementation behind the function
-+ * pointers of a set_type, is expected to return 0 if ok, and a negative
-+ * errno (e.g. -EINVAL) on error.
-+ */
-+struct ip_set_type {
-+ struct list_head list; /* next in list of set types */
-+
-+ /* test for IP in set (kernel: iptables -m set src|dst)
-+ * return 0 if not in set, 1 if in set.
-+ */
-+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* test for IP in set (userspace: ipset -T set IP)
-+ * return 0 if not in set, 1 if in set.
-+ */
-+ int (*testip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /*
-+ * Size of the data structure passed by when
-+ * adding/deletin/testing an entry.
-+ */
-+ size_t reqsize;
-+
-+ /* Add IP into set (userspace: ipset -A set IP)
-+ * Return -EEXIST if the address is already in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address was not already in the set, 0 is returned.
-+ */
-+ int (*addip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
-+ * Return -EEXIST if the address is already in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address was not already in the set, 0 is returned.
-+ */
-+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* remove IP from set (userspace: ipset -D set --entry x)
-+ * Return -EEXIST if the address is NOT in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address really was in the set, 0 is returned.
-+ */
-+ int (*delip) (struct ip_set *set,
-+ const void *data, size_t size,
-+ ip_set_ip_t *ip);
-+
-+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
-+ * Return -EEXIST if the address is NOT in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address really was in the set, 0 is returned.
-+ */
-+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* new set creation - allocated type specific items
-+ */
-+ int (*create) (struct ip_set *set,
-+ const void *data, size_t size);
-+
-+ /* retry the operation after successfully tweaking the set
-+ */
-+ int (*retry) (struct ip_set *set);
-+
-+ /* set destruction - free type specific items
-+ * There is no return value.
-+ * Can be called only when child sets are destroyed.
-+ */
-+ void (*destroy) (struct ip_set *set);
-+
-+ /* set flushing - reset all bits in the set, or something similar.
-+ * There is no return value.
-+ */
-+ void (*flush) (struct ip_set *set);
-+
-+ /* Listing: size needed for header
-+ */
-+ size_t header_size;
-+
-+ /* Listing: Get the header
-+ *
-+ * Fill in the information in "data".
-+ * This function is always run after list_header_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
-+ */
-+ void (*list_header) (const struct ip_set *set,
-+ void *data);
-+
-+ /* Listing: Get the size for the set members
-+ */
-+ int (*list_members_size) (const struct ip_set *set);
-+
-+ /* Listing: Get the set members
-+ *
-+ * Fill in the information in "data".
-+ * This function is always run after list_member_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
-+ */
-+ void (*list_members) (const struct ip_set *set,
-+ void *data);
-+
-+ char typename[IP_SET_MAXNAMELEN];
-+ unsigned char features;
-+ int protocol_version;
-+
-+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
-+ struct module *me;
-+};
-+
-+extern int ip_set_register_set_type(struct ip_set_type *set_type);
-+extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
-+
-+/* A generic ipset */
-+struct ip_set {
-+ char name[IP_SET_MAXNAMELEN]; /* the name of the set */
-+ rwlock_t lock; /* lock for concurrency control */
-+ ip_set_id_t id; /* set id for swapping */
-+ ip_set_id_t binding; /* default binding for the set */
-+ atomic_t ref; /* in kernel and in hash references */
-+ struct ip_set_type *type; /* the set types */
-+ void *data; /* pooltype specific data */
-+};
-+
-+/* Structure to bind set elements to sets */
-+struct ip_set_hash {
-+ struct list_head list; /* list of clashing entries in hash */
-+ ip_set_ip_t ip; /* ip from set */
-+ ip_set_id_t id; /* set id */
-+ ip_set_id_t binding; /* set we bind the element to */
-+};
-+
-+/* register and unregister set references */
-+extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
-+extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
-+extern void ip_set_put(ip_set_id_t id);
-+
-+/* API for iptables set match, and SET target */
-+extern void ip_set_addip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern void ip_set_delip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+extern int ip_set_testip_kernel(ip_set_id_t id,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_H*/
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iphash.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iphash.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iphash.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,30 @@
-+#ifndef __IP_SET_IPHASH_H
-+#define __IP_SET_IPHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "iphash"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_iphash {
-+ ip_set_ip_t *members; /* the iphash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t netmask; /* netmask */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_iphash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t netmask;
-+};
-+
-+struct ip_set_req_iphash {
-+ ip_set_ip_t ip;
-+};
-+
-+#endif /* __IP_SET_IPHASH_H */
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipmap.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipmap.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipmap.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,56 @@
-+#ifndef __IP_SET_IPMAP_H
-+#define __IP_SET_IPMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "ipmap"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_ipmap {
-+ void *members; /* the ipmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ ip_set_ip_t netmask; /* subnet netmask */
-+ ip_set_ip_t sizeid; /* size of set in IPs */
-+ ip_set_ip_t hosts; /* number of hosts in a subnet */
-+};
-+
-+struct ip_set_req_ipmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+ ip_set_ip_t netmask;
-+};
-+
-+struct ip_set_req_ipmap {
-+ ip_set_ip_t ip;
-+};
-+
-+unsigned int
-+mask_to_bits(ip_set_ip_t mask)
-+{
-+ unsigned int bits = 32;
-+ ip_set_ip_t maskaddr;
-+
-+ if (mask == 0xFFFFFFFF)
-+ return bits;
-+
-+ maskaddr = 0xFFFFFFFE;
-+ while (--bits >= 0 && maskaddr != mask)
-+ maskaddr <<= 1;
-+
-+ return bits;
-+}
-+
-+ip_set_ip_t
-+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
-+{
-+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
-+ *bits = 32;
-+ while (--(*bits) >= 0 && mask && (to & mask) != from)
-+ mask <<= 1;
-+
-+ return mask;
-+}
-+
-+#endif /* __IP_SET_IPMAP_H */
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipporthash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipporthash.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_ipporthash.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_ipporthash.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,34 @@
-+#ifndef __IP_SET_IPPORTHASH_H
-+#define __IP_SET_IPPORTHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "ipporthash"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
-+
-+struct ip_set_ipporthash {
-+ ip_set_ip_t *members; /* the ipporthash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_ipporthash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_ipporthash {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t port;
-+};
-+
-+#endif /* __IP_SET_IPPORTHASH_H */
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iptree.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_iptree.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_iptree.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,40 @@
-+#ifndef __IP_SET_IPTREE_H
-+#define __IP_SET_IPTREE_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "iptree"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_iptreed {
-+ unsigned long expires[256]; /* x.x.x.ADDR */
-+};
-+
-+struct ip_set_iptreec {
-+ struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
-+};
-+
-+struct ip_set_iptreeb {
-+ struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
-+};
-+
-+struct ip_set_iptree {
-+ unsigned int timeout;
-+ unsigned int gc_interval;
-+#ifdef __KERNEL__
-+ uint32_t elements; /* number of elements */
-+ struct timer_list gc;
-+ struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
-+#endif
-+};
-+
-+struct ip_set_req_iptree_create {
-+ unsigned int timeout;
-+};
-+
-+struct ip_set_req_iptree {
-+ ip_set_ip_t ip;
-+ unsigned int timeout;
-+};
-+
-+#endif /* __IP_SET_IPTREE_H */
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_jhash.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_jhash.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_jhash.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,148 @@
-+#ifndef _LINUX_IPSET_JHASH_H
-+#define _LINUX_IPSET_JHASH_H
-+
-+/* This is a copy of linux/jhash.h but the types u32/u8 are changed
-+ * to __u32/__u8 so that the header file can be included into
-+ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
-+ */
-+
-+/* jhash.h: Jenkins hash support.
-+ *
-+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
-+ *
-+ * http://burtleburtle.net/bob/hash/
-+ *
-+ * These are the credits from Bob's sources:
-+ *
-+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
-+ * hash(), hash2(), hash3, and mix() are externally useful functions.
-+ * Routines to test the hash are included if SELF_TEST is defined.
-+ * You can use this free for any purpose. It has no warranty.
-+ *
-+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
-+ *
-+ * I've modified Bob's hash to be useful in the Linux kernel, and
-+ * any bugs present are surely my fault. -DaveM
-+ */
-+
-+/* NOTE: Arguments are modified. */
-+#define __jhash_mix(a, b, c) \
-+{ \
-+ a -= b; a -= c; a ^= (c>>13); \
-+ b -= c; b -= a; b ^= (a<<8); \
-+ c -= a; c -= b; c ^= (b>>13); \
-+ a -= b; a -= c; a ^= (c>>12); \
-+ b -= c; b -= a; b ^= (a<<16); \
-+ c -= a; c -= b; c ^= (b>>5); \
-+ a -= b; a -= c; a ^= (c>>3); \
-+ b -= c; b -= a; b ^= (a<<10); \
-+ c -= a; c -= b; c ^= (b>>15); \
-+}
-+
-+/* The golden ration: an arbitrary value */
-+#define JHASH_GOLDEN_RATIO 0x9e3779b9
-+
-+/* The most generic version, hashes an arbitrary sequence
-+ * of bytes. No alignment or length assumptions are made about
-+ * the input key.
-+ */
-+static inline __u32 jhash(void *key, __u32 length, __u32 initval)
-+{
-+ __u32 a, b, c, len;
-+ __u8 *k = key;
-+
-+ len = length;
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+
-+ while (len >= 12) {
-+ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
-+ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
-+ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
-+
-+ __jhash_mix(a,b,c);
-+
-+ k += 12;
-+ len -= 12;
-+ }
-+
-+ c += length;
-+ switch (len) {
-+ case 11: c += ((__u32)k[10]<<24);
-+ case 10: c += ((__u32)k[9]<<16);
-+ case 9 : c += ((__u32)k[8]<<8);
-+ case 8 : b += ((__u32)k[7]<<24);
-+ case 7 : b += ((__u32)k[6]<<16);
-+ case 6 : b += ((__u32)k[5]<<8);
-+ case 5 : b += k[4];
-+ case 4 : a += ((__u32)k[3]<<24);
-+ case 3 : a += ((__u32)k[2]<<16);
-+ case 2 : a += ((__u32)k[1]<<8);
-+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
-+
-+ return c;
-+}
-+
-+/* A special optimized version that handles 1 or more of __u32s.
-+ * The length parameter here is the number of __u32s in the key.
-+ */
-+static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
-+{
-+ __u32 a, b, c, len;
-+
-+ a = b = JHASH_GOLDEN_RATIO;
-+ c = initval;
-+ len = length;
-+
-+ while (len >= 3) {
-+ a += k[0];
-+ b += k[1];
-+ c += k[2];
-+ __jhash_mix(a, b, c);
-+ k += 3; len -= 3;
-+ }
-+
-+ c += length * 4;
-+
-+ switch (len) {
-+ case 2 : b += k[1];
-+ case 1 : a += k[0];
-+ };
-+
-+ __jhash_mix(a,b,c);
-+
-+ return c;
-+}
-+
-+
-+/* A special ultra-optimized versions that knows they are hashing exactly
-+ * 3, 2 or 1 word(s).
-+ *
-+ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
-+ * done at the end is not done here.
-+ */
-+static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
-+{
-+ a += JHASH_GOLDEN_RATIO;
-+ b += JHASH_GOLDEN_RATIO;
-+ c += initval;
-+
-+ __jhash_mix(a, b, c);
-+
-+ return c;
-+}
-+
-+static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
-+{
-+ return jhash_3words(a, b, 0, initval);
-+}
-+
-+static inline __u32 jhash_1word(__u32 a, __u32 initval)
-+{
-+ return jhash_3words(a, 0, 0, initval);
-+}
-+
-+#endif /* _LINUX_IPSET_JHASH_H */
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_macipmap.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_macipmap.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_macipmap.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,38 @@
-+#ifndef __IP_SET_MACIPMAP_H
-+#define __IP_SET_MACIPMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "macipmap"
-+#define MAX_RANGE 0x0000FFFF
-+
-+/* general flags */
-+#define IPSET_MACIP_MATCHUNSET 1
-+
-+/* per ip flags */
-+#define IPSET_MACIP_ISSET 1
-+
-+struct ip_set_macipmap {
-+ void *members; /* the macipmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ u_int32_t flags;
-+};
-+
-+struct ip_set_req_macipmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+ u_int32_t flags;
-+};
-+
-+struct ip_set_req_macipmap {
-+ ip_set_ip_t ip;
-+ unsigned char ethernet[ETH_ALEN];
-+};
-+
-+struct ip_set_macip {
-+ unsigned short flags;
-+ unsigned char ethernet[ETH_ALEN];
-+};
-+
-+#endif /* __IP_SET_MACIPMAP_H */
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_malloc.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_malloc.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_malloc.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,116 @@
-+#ifndef _IP_SET_MALLOC_H
-+#define _IP_SET_MALLOC_H
-+
-+#ifdef __KERNEL__
-+
-+/* Memory allocation and deallocation */
-+static size_t max_malloc_size = 0;
-+
-+static inline void init_max_malloc_size(void)
-+{
-+#define CACHE(x) max_malloc_size = x;
-+#include <linux/kmalloc_sizes.h>
-+#undef CACHE
-+}
-+
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL);
-+}
-+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ if (bytes > max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
-+}
-+
-+struct harray {
-+ size_t max_elements;
-+ void *arrays[0];
-+};
-+
-+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, int flags)
-+{
-+ struct harray *harray;
-+ size_t max_elements, size, i, j;
-+
-+ if (!max_malloc_size)
-+ init_max_malloc_size();
-+
-+ if (typesize > max_malloc_size)
-+ return NULL;
-+
-+ max_elements = max_malloc_size/typesize;
-+ size = hashsize/max_elements;
-+ if (hashsize % max_elements)
-+ size++;
-+
-+ /* Last pointer signals end of arrays */
-+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
-+ flags);
-+
-+ if (!harray)
-+ return NULL;
-+
-+ for (i = 0; i < size - 1; i++) {
-+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
-+ if (!harray->arrays[i])
-+ goto undo;
-+ memset(harray->arrays[i], 0, max_elements * typesize);
-+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
-+ flags);
-+ if (!harray->arrays[i])
-+ goto undo;
-+ memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
-+
-+ harray->max_elements = max_elements;
-+ harray->arrays[size] = NULL;
-+
-+ return (void *)harray;
-+
-+ undo:
-+ for (j = 0; j < i; j++) {
-+ kfree(harray->arrays[j]);
-+ }
-+ kfree(harray);
-+ return NULL;
-+}
-+
-+static inline void harray_free(void *h)
-+{
-+ struct harray *harray = (struct harray *) h;
-+ size_t i;
-+
-+ for (i = 0; harray->arrays[i] != NULL; i++)
-+ kfree(harray->arrays[i]);
-+ kfree(harray);
-+}
-+
-+static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
-+{
-+ struct harray *harray = (struct harray *) h;
-+ size_t i;
-+
-+ for (i = 0; harray->arrays[i+1] != NULL; i++)
-+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
-+ (hashsize - i * harray->max_elements) * typesize);
-+}
-+
-+#define HARRAY_ELEM(h, type, which) \
-+({ \
-+ struct harray *__h = (struct harray *)(h); \
-+ ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
-+ + (which)%(__h)->max_elements); \
-+})
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_MALLOC_H*/
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_nethash.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_nethash.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_nethash.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,55 @@
-+#ifndef __IP_SET_NETHASH_H
-+#define __IP_SET_NETHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "nethash"
-+#define MAX_RANGE 0x0000FFFF
-+
-+struct ip_set_nethash {
-+ ip_set_ip_t *members; /* the nethash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ unsigned char cidr[30]; /* CIDR sizes */
-+ void *initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_nethash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+};
-+
-+struct ip_set_req_nethash {
-+ ip_set_ip_t ip;
-+ unsigned char cidr;
-+};
-+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
-+#endif /* __IP_SET_NETHASH_H */
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_portmap.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ip_set_portmap.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ip_set_portmap.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,25 @@
-+#ifndef __IP_SET_PORTMAP_H
-+#define __IP_SET_PORTMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "portmap"
-+#define MAX_RANGE 0x0000FFFF
-+#define INVALID_PORT (MAX_RANGE + 1)
-+
-+struct ip_set_portmap {
-+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_port; /* host byte order, included in range */
-+ ip_set_ip_t last_port; /* host byte order, included in range */
-+};
-+
-+struct ip_set_req_portmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_portmap {
-+ ip_set_ip_t port;
-+};
-+
-+#endif /* __IP_SET_PORTMAP_H */
-diff -ruN linux-2.6.21.1/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.21.1.new/include/linux/netfilter_ipv4/ipt_set.h
---- linux-2.6.21.1/include/linux/netfilter_ipv4/ipt_set.h 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/include/linux/netfilter_ipv4/ipt_set.h 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,21 @@
-+#ifndef _IPT_SET_H
-+#define _IPT_SET_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+struct ipt_set_info {
-+ ip_set_id_t index;
-+ u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
-+};
-+
-+/* match info */
-+struct ipt_set_info_match {
-+ struct ipt_set_info match_set;
-+};
-+
-+struct ipt_set_info_target {
-+ struct ipt_set_info add_set;
-+ struct ipt_set_info del_set;
-+};
-+
-+#endif /*_IPT_SET_H*/
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set.c
---- linux-2.6.21.1/net/ipv4/netfilter/ip_set.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set.c 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,2001 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module for IP set management */
-+
-+#include <linux/version.h>
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+#include <linux/config.h>
-+#endif
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/kmod.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/random.h>
-+#include <linux/jhash.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <asm/semaphore.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+
-+#define ASSERT_READ_LOCK(x)
-+#define ASSERT_WRITE_LOCK(x)
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+static struct list_head set_type_list; /* all registered sets */
-+static struct ip_set **ip_set_list; /* all individual sets */
-+static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
-+static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
-+static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
-+static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
-+static struct list_head *ip_set_hash; /* hash of bindings */
-+static unsigned int ip_set_hash_random; /* random seed */
-+
-+/*
-+ * Sets are identified either by the index in ip_set_list or by id.
-+ * The id never changes and is used to find a key in the hash.
-+ * The index may change by swapping and used at all other places
-+ * (set/SET netfilter modules, binding value, etc.)
-+ *
-+ * Userspace requests are serialized by ip_set_mutex and sets can
-+ * be deleted only from userspace. Therefore ip_set_list locking
-+ * must obey the following rules:
-+ *
-+ * - kernel requests: read and write locking mandatory
-+ * - user requests: read locking optional, write locking mandatory
-+ */
-+
-+static inline void
-+__ip_set_get(ip_set_id_t index)
-+{
-+ atomic_inc(&ip_set_list[index]->ref);
-+}
-+
-+static inline void
-+__ip_set_put(ip_set_id_t index)
-+{
-+ atomic_dec(&ip_set_list[index]->ref);
-+}
-+
-+/*
-+ * Binding routines
-+ */
-+
-+static inline struct ip_set_hash *
-+__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ struct ip_set_hash *set_hash;
-+
-+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
-+ if (set_hash->id == id && set_hash->ip == ip)
-+ return set_hash;
-+
-+ return NULL;
-+}
-+
-+static ip_set_id_t
-+ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+
-+ ASSERT_READ_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
-+ set_hash = __ip_set_find(key, id, ip);
-+
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip),
-+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
-+
-+ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
-+}
-+
-+static inline void
-+__set_hash_del(struct ip_set_hash *set_hash)
-+{
-+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
-+
-+ __ip_set_put(set_hash->binding);
-+ list_del(&set_hash->list);
-+ kfree(set_hash);
-+}
-+
-+static int
-+ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+ write_lock_bh(&ip_set_lock);
-+ set_hash = __ip_set_find(key, id, ip);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip),
-+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
-+
-+ if (set_hash != NULL)
-+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+}
-+
-+static int
-+ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+ int ret = 0;
-+
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ IP_SET_ASSERT(ip_set_list[binding]);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip), ip_set_list[binding]->name);
-+ write_lock_bh(&ip_set_lock);
-+ set_hash = __ip_set_find(key, id, ip);
-+ if (!set_hash) {
-+ set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
-+ if (!set_hash) {
-+ ret = -ENOMEM;
-+ goto unlock;
-+ }
-+ INIT_LIST_HEAD(&set_hash->list);
-+ set_hash->id = id;
-+ set_hash->ip = ip;
-+ list_add(&set_hash->list, &ip_set_hash[key]);
-+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
-+ DP("overwrite binding: %s",
-+ ip_set_list[set_hash->binding]->name);
-+ __ip_set_put(set_hash->binding);
-+ }
-+ set_hash->binding = binding;
-+ __ip_set_get(set_hash->binding);
-+ DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
-+ key, id, ip_set_list[id]->name,
-+ HIPQUAD(ip), binding, ip_set_list[binding]->name);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return ret;
-+}
-+
-+#define FOREACH_HASH_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __key; \
-+ struct ip_set_hash *__set_hash; \
-+ \
-+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
-+ list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
-+ fn(__set_hash , ## args); \
-+ } \
-+})
-+
-+#define FOREACH_HASH_RW_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __key; \
-+ struct ip_set_hash *__set_hash, *__n; \
-+ \
-+ ASSERT_WRITE_LOCK(&ip_set_lock); \
-+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
-+ list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
-+ fn(__set_hash , ## args); \
-+ } \
-+})
-+
-+/* Add, del and test set entries from kernel */
-+
-+#define follow_bindings(index, set, ip) \
-+((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
-+ || (index = (set)->binding) != IP_SET_INVALID_ID)
-+
-+int
-+ip_set_testip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ read_lock_bh(&set->lock);
-+ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
-+ read_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while (res > 0
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return res;
-+}
-+
-+void
-+ip_set_addip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ retry:
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ write_lock_bh(&set->lock);
-+ res = set->type->addip_kernel(set, skb, &ip, flags, i++);
-+ write_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ if (res == -EAGAIN
-+ && set->type->retry
-+ && (res = set->type->retry(set)) == 0)
-+ goto retry;
-+}
-+
-+void
-+ip_set_delip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ write_lock_bh(&set->lock);
-+ res = set->type->delip_kernel(set, skb, &ip, flags, i++);
-+ write_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+}
-+
-+/* Register and deregister settype */
-+
-+static inline struct ip_set_type *
-+find_set_type(const char *name)
-+{
-+ struct ip_set_type *set_type;
-+
-+ list_for_each_entry(set_type, &set_type_list, list)
-+ if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
-+ return set_type;
-+ return NULL;
-+}
-+
-+int
-+ip_set_register_set_type(struct ip_set_type *set_type)
-+{
-+ int ret = 0;
-+
-+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
-+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
-+ set_type->typename,
-+ set_type->protocol_version,
-+ IP_SET_PROTOCOL_VERSION);
-+ return -EINVAL;
-+ }
-+
-+ write_lock_bh(&ip_set_lock);
-+ if (find_set_type(set_type->typename)) {
-+ /* Duplicate! */
-+ ip_set_printk("'%s' already registered!",
-+ set_type->typename);
-+ ret = -EINVAL;
-+ goto unlock;
-+ }
-+ if (!try_module_get(THIS_MODULE)) {
-+ ret = -EFAULT;
-+ goto unlock;
-+ }
-+ list_add(&set_type->list, &set_type_list);
-+ DP("'%s' registered.", set_type->typename);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return ret;
-+}
-+
-+void
-+ip_set_unregister_set_type(struct ip_set_type *set_type)
-+{
-+ write_lock_bh(&ip_set_lock);
-+ if (!find_set_type(set_type->typename)) {
-+ ip_set_printk("'%s' not registered?",
-+ set_type->typename);
-+ goto unlock;
-+ }
-+ list_del(&set_type->list);
-+ module_put(THIS_MODULE);
-+ DP("'%s' unregistered.", set_type->typename);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+
-+}
-+
-+/*
-+ * Userspace routines
-+ */
-+
-+/*
-+ * Find set by name, reference it once. The reference makes sure the
-+ * thing pointed to, does not go away under our feet. Drop the reference
-+ * later, using ip_set_put().
-+ */
-+ip_set_id_t
-+ip_set_get_byname(const char *name)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ down(&ip_set_app_mutex);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
-+ __ip_set_get(i);
-+ index = i;
-+ break;
-+ }
-+ }
-+ up(&ip_set_app_mutex);
-+ return index;
-+}
-+
-+/*
-+ * Find set by index, reference it once. The reference makes sure the
-+ * thing pointed to, does not go away under our feet. Drop the reference
-+ * later, using ip_set_put().
-+ */
-+ip_set_id_t
-+ip_set_get_byindex(ip_set_id_t index)
-+{
-+ down(&ip_set_app_mutex);
-+
-+ if (index >= ip_set_max)
-+ return IP_SET_INVALID_ID;
-+
-+ if (ip_set_list[index])
-+ __ip_set_get(index);
-+ else
-+ index = IP_SET_INVALID_ID;
-+
-+ up(&ip_set_app_mutex);
-+ return index;
-+}
-+
-+/*
-+ * If the given set pointer points to a valid set, decrement
-+ * reference count by 1. The caller shall not assume the index
-+ * to be valid, after calling this function.
-+ */
-+void ip_set_put(ip_set_id_t index)
-+{
-+ down(&ip_set_app_mutex);
-+ if (ip_set_list[index])
-+ __ip_set_put(index);
-+ up(&ip_set_app_mutex);
-+}
-+
-+/* Find a set by name or index */
-+static ip_set_id_t
-+ip_set_find_byname(const char *name)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strcmp(ip_set_list[i]->name, name) == 0) {
-+ index = i;
-+ break;
-+ }
-+ }
-+ return index;
-+}
-+
-+static ip_set_id_t
-+ip_set_find_byindex(ip_set_id_t index)
-+{
-+ if (index >= ip_set_max || ip_set_list[index] == NULL)
-+ index = IP_SET_INVALID_ID;
-+
-+ return index;
-+}
-+
-+/*
-+ * Add, del, test, bind and unbind
-+ */
-+
-+static inline int
-+__ip_set_testip(struct ip_set *set,
-+ const void *data,
-+ size_t size,
-+ ip_set_ip_t *ip)
-+{
-+ int res;
-+
-+ read_lock_bh(&set->lock);
-+ res = set->type->testip(set, data, size, ip);
-+ read_unlock_bh(&set->lock);
-+
-+ return res;
-+}
-+
-+static int
-+__ip_set_addip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ do {
-+ write_lock_bh(&set->lock);
-+ res = set->type->addip(set, data, size, &ip);
-+ write_unlock_bh(&set->lock);
-+ } while (res == -EAGAIN
-+ && set->type->retry
-+ && (res = set->type->retry(set)) == 0);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_addip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+
-+ return __ip_set_addip(index,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt));
-+}
-+
-+static int
-+ip_set_delip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ write_lock_bh(&set->lock);
-+ res = set->type->delip(set,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt),
-+ &ip);
-+ write_unlock_bh(&set->lock);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_testip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt),
-+ &ip);
-+
-+ return (res > 0 ? -EEXIST : res);
-+}
-+
-+static int
-+ip_set_bindip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_id_t binding;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of a set */
-+ char *binding_name;
-+
-+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
-+ return -EINVAL;
-+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ binding = ip_set_find_byname(binding_name);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ write_lock_bh(&ip_set_lock);
-+ /* Sets as binding values are referenced */
-+ if (set->binding != IP_SET_INVALID_ID)
-+ __ip_set_put(set->binding);
-+ set->binding = binding;
-+ __ip_set_get(set->binding);
-+ write_unlock_bh(&ip_set_lock);
-+
-+ return 0;
-+ }
-+ binding = ip_set_find_byname(req_bind->binding);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
-+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
-+ if (res >= 0)
-+ res = ip_set_hash_add(set->id, ip, binding);
-+
-+ return res;
-+}
-+
-+#define FOREACH_SET_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __i; \
-+ struct ip_set *__set; \
-+ \
-+ for (__i = 0; __i < ip_set_max; __i++) { \
-+ __set = ip_set_list[__i]; \
-+ if (__set != NULL) \
-+ fn(__set , ##args); \
-+ } \
-+})
-+
-+static inline void
-+__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
-+{
-+ if (set_hash->id == id)
-+ __set_hash_del(set_hash);
-+}
-+
-+static inline void
-+__unbind_default(struct ip_set *set)
-+{
-+ if (set->binding != IP_SET_INVALID_ID) {
-+ /* Sets as binding values are referenced */
-+ __ip_set_put(set->binding);
-+ set->binding = IP_SET_INVALID_ID;
-+ }
-+}
-+
-+static int
-+ip_set_unbindip(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set;
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ DP("");
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ DP("%u %s", index, req_bind->binding);
-+ if (index == IP_SET_INVALID_ID) {
-+ /* unbind :all: */
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of sets */
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_SET_DO(__unbind_default);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all bindings of all sets*/
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ }
-+ DP("unreachable reached!");
-+ return -EINVAL;
-+ }
-+
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of set */
-+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
-+
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ write_lock_bh(&ip_set_lock);
-+ /* Sets in hash values are referenced */
-+ __ip_set_put(set->binding);
-+ set->binding = IP_SET_INVALID_ID;
-+ write_unlock_bh(&ip_set_lock);
-+
-+ return 0;
-+ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all bindings */
-+
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ }
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+
-+ DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
-+ if (res >= 0)
-+ res = ip_set_hash_del(set->id, ip);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_testbind(ip_set_id_t index,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_req_bind *req_bind;
-+ ip_set_id_t binding;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = (struct ip_set_req_bind *) data;
-+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
-+ /* Default binding of set */
-+ char *binding_name;
-+
-+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
-+ return -EINVAL;
-+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
-+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ binding = ip_set_find_byname(binding_name);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ res = (set->binding == binding) ? -EEXIST : 0;
-+
-+ return res;
-+ }
-+ binding = ip_set_find_byname(req_bind->binding);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
-+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
-+ if (res >= 0)
-+ res = (ip_set_find_in_hash(set->id, ip) == binding)
-+ ? -EEXIST : 0;
-+
-+ return res;
-+}
-+
-+static struct ip_set_type *
-+find_set_type_rlock(const char *typename)
-+{
-+ struct ip_set_type *type;
-+
-+ read_lock_bh(&ip_set_lock);
-+ type = find_set_type(typename);
-+ if (type == NULL)
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return type;
-+}
-+
-+static int
-+find_free_id(const char *name,
-+ ip_set_id_t *index,
-+ ip_set_id_t *id)
-+{
-+ ip_set_id_t i;
-+
-+ *id = IP_SET_INVALID_ID;
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] == NULL) {
-+ if (*id == IP_SET_INVALID_ID)
-+ *id = *index = i;
-+ } else if (strcmp(name, ip_set_list[i]->name) == 0)
-+ /* Name clash */
-+ return -EEXIST;
-+ }
-+ if (*id == IP_SET_INVALID_ID)
-+ /* No free slot remained */
-+ return -ERANGE;
-+ /* Check that index is usable as id (swapping) */
-+ check:
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && ip_set_list[i]->id == *id) {
-+ *id = i;
-+ goto check;
-+ }
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Create a set
-+ */
-+static int
-+ip_set_create(const char *name,
-+ const char *typename,
-+ ip_set_id_t restore,
-+ const void *data,
-+ size_t size)
-+{
-+ struct ip_set *set;
-+ ip_set_id_t index = 0, id;
-+ int res = 0;
-+
-+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
-+ /*
-+ * First, and without any locks, allocate and initialize
-+ * a normal base set structure.
-+ */
-+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
-+ if (!set)
-+ return -ENOMEM;
-+ set->lock = RW_LOCK_UNLOCKED;
-+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
-+ set->binding = IP_SET_INVALID_ID;
-+ atomic_set(&set->ref, 0);
-+
-+ /*
-+ * Next, take the &ip_set_lock, check that we know the type,
-+ * and take a reference on the type, to make sure it
-+ * stays available while constructing our new set.
-+ *
-+ * After referencing the type, we drop the &ip_set_lock,
-+ * and let the new set construction run without locks.
-+ */
-+ set->type = find_set_type_rlock(typename);
-+ if (set->type == NULL) {
-+ /* Try loading the module */
-+ char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
-+ strcpy(modulename, "ip_set_");
-+ strcat(modulename, typename);
-+ DP("try to load %s", modulename);
-+ request_module(modulename);
-+ set->type = find_set_type_rlock(typename);
-+ }
-+ if (set->type == NULL) {
-+ ip_set_printk("no set type '%s', set '%s' not created",
-+ typename, name);
-+ res = -ENOENT;
-+ goto out;
-+ }
-+ if (!try_module_get(set->type->me)) {
-+ read_unlock_bh(&ip_set_lock);
-+ res = -EFAULT;
-+ goto out;
-+ }
-+ read_unlock_bh(&ip_set_lock);
-+
-+ /*
-+ * Without holding any locks, create private part.
-+ */
-+ res = set->type->create(set, data, size);
-+ if (res != 0)
-+ goto put_out;
-+
-+ /* BTW, res==0 here. */
-+
-+ /*
-+ * Here, we have a valid, constructed set. &ip_set_lock again,
-+ * find free id/index and check that it is not already in
-+ * ip_set_list.
-+ */
-+ write_lock_bh(&ip_set_lock);
-+ if ((res = find_free_id(set->name, &index, &id)) != 0) {
-+ DP("no free id!");
-+ goto cleanup;
-+ }
-+
-+ /* Make sure restore gets the same index */
-+ if (restore != IP_SET_INVALID_ID && index != restore) {
-+ DP("Can't restore, sets are screwed up");
-+ res = -ERANGE;
-+ goto cleanup;
-+ }
-+
-+ /*
-+ * Finally! Add our shiny new set to the list, and be done.
-+ */
-+ DP("create: '%s' created with index %u, id %u!", set->name, index, id);
-+ set->id = id;
-+ ip_set_list[index] = set;
-+ write_unlock_bh(&ip_set_lock);
-+ return res;
-+
-+ cleanup:
-+ write_unlock_bh(&ip_set_lock);
-+ set->type->destroy(set);
-+ put_out:
-+ module_put(set->type->me);
-+ out:
-+ kfree(set);
-+ return res;
-+}
-+
-+/*
-+ * Destroy a given existing set
-+ */
-+static void
-+ip_set_destroy_set(ip_set_id_t index)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+
-+ IP_SET_ASSERT(set);
-+ DP("set: %s", set->name);
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
-+ if (set->binding != IP_SET_INVALID_ID)
-+ __ip_set_put(set->binding);
-+ ip_set_list[index] = NULL;
-+ write_unlock_bh(&ip_set_lock);
-+
-+ /* Must call it without holding any lock */
-+ set->type->destroy(set);
-+ module_put(set->type->me);
-+ kfree(set);
-+}
-+
-+/*
-+ * Destroy a set - or all sets
-+ * Sets must not be referenced/used.
-+ */
-+static int
-+ip_set_destroy(ip_set_id_t index)
-+{
-+ ip_set_id_t i;
-+
-+ /* ref modification always protected by the mutex */
-+ if (index != IP_SET_INVALID_ID) {
-+ if (atomic_read(&ip_set_list[index]->ref))
-+ return -EBUSY;
-+ ip_set_destroy_set(index);
-+ } else {
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && (atomic_read(&ip_set_list[i]->ref)))
-+ return -EBUSY;
-+ }
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL)
-+ ip_set_destroy_set(i);
-+ }
-+ }
-+ return 0;
-+}
-+
-+static void
-+ip_set_flush_set(struct ip_set *set)
-+{
-+ DP("set: %s %u", set->name, set->id);
-+
-+ write_lock_bh(&set->lock);
-+ set->type->flush(set);
-+ write_unlock_bh(&set->lock);
-+}
-+
-+/*
-+ * Flush data in a set - or in all sets
-+ */
-+static int
-+ip_set_flush(ip_set_id_t index)
-+{
-+ if (index != IP_SET_INVALID_ID) {
-+ IP_SET_ASSERT(ip_set_list[index]);
-+ ip_set_flush_set(ip_set_list[index]);
-+ } else
-+ FOREACH_SET_DO(ip_set_flush_set);
-+
-+ return 0;
-+}
-+
-+/* Rename a set */
-+static int
-+ip_set_rename(ip_set_id_t index, const char *name)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_id_t i;
-+ int res = 0;
-+
-+ DP("set: %s to %s", set->name, name);
-+ write_lock_bh(&ip_set_lock);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
-+ name,
-+ IP_SET_MAXNAMELEN - 1) == 0) {
-+ res = -EEXIST;
-+ goto unlock;
-+ }
-+ }
-+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return res;
-+}
-+
-+/*
-+ * Swap two sets so that name/index points to the other.
-+ * References are also swapped.
-+ */
-+static int
-+ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
-+{
-+ struct ip_set *from = ip_set_list[from_index];
-+ struct ip_set *to = ip_set_list[to_index];
-+ char from_name[IP_SET_MAXNAMELEN];
-+ u_int32_t from_ref;
-+
-+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change. Artifical restriction. */
-+ if (from->type->features != to->type->features)
-+ return -ENOEXEC;
-+
-+ /* No magic here: ref munging protected by the mutex */
-+ write_lock_bh(&ip_set_lock);
-+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
-+ from_ref = atomic_read(&from->ref);
-+
-+ strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
-+ atomic_set(&from->ref, atomic_read(&to->ref));
-+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
-+ atomic_set(&to->ref, from_ref);
-+
-+ ip_set_list[from_index] = to;
-+ ip_set_list[to_index] = from;
-+
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+}
-+
-+/*
-+ * List set data
-+ */
-+
-+static inline void
-+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
-+{
-+ if (set_hash->id == id)
-+ *size += sizeof(struct ip_set_hash_list);
-+}
-+
-+static inline void
-+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, size_t *size)
-+{
-+ if (set_hash->id == id)
-+ *size += sizeof(struct ip_set_hash_save);
-+}
-+
-+static inline void
-+__set_hash_bindings(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, void *data, int *used)
-+{
-+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
-+ (struct ip_set_hash_list *)(data + *used);
-+
-+ hash_list->ip = set_hash->ip;
-+ hash_list->binding = set_hash->binding;
-+ *used += sizeof(struct ip_set_hash_list);
-+ }
-+}
-+
-+static int ip_set_list_set(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_list *set_list;
-+
-+ /* Pointer to our header */
-+ set_list = (struct ip_set_list *) (data + *used);
-+
-+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
-+
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_list) > len)
-+ goto not_enough_mem;
-+ *used += sizeof(struct ip_set_list);
-+
-+ read_lock_bh(&set->lock);
-+ /* Get and ensure set specific header size */
-+ set_list->header_size = set->type->header_size;
-+ if (*used + set_list->header_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in the header */
-+ set_list->index = index;
-+ set_list->binding = set->binding;
-+ set_list->ref = atomic_read(&set->ref);
-+
-+ /* Fill in set spefific header data */
-+ set->type->list_header(set, data + *used);
-+ *used += set_list->header_size;
-+
-+ /* Get and ensure set specific members size */
-+ set_list->members_size = set->type->list_members_size(set);
-+ if (*used + set_list->members_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in set spefific members data */
-+ set->type->list_members(set, data + *used);
-+ *used += set_list->members_size;
-+ read_unlock_bh(&set->lock);
-+
-+ /* Bindings */
-+
-+ /* Get and ensure set specific bindings size */
-+ set_list->bindings_size = 0;
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
-+ set->id, &set_list->bindings_size);
-+ if (*used + set_list->bindings_size > len)
-+ goto not_enough_mem;
-+
-+ /* Fill in set spefific bindings data */
-+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
-+ return 0;
-+
-+ unlock_set:
-+ read_unlock_bh(&set->lock);
-+ not_enough_mem:
-+ DP("not enough mem, try again");
-+ return -EAGAIN;
-+}
-+
-+/*
-+ * Save sets
-+ */
-+static int ip_set_save_set(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ struct ip_set *set;
-+ struct ip_set_save *set_save;
-+
-+ /* Pointer to our header */
-+ set_save = (struct ip_set_save *) (data + *used);
-+
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_save) > len)
-+ goto not_enough_mem;
-+ *used += sizeof(struct ip_set_save);
-+
-+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
-+ data, data + *used);
-+
-+ read_lock_bh(&set->lock);
-+ /* Get and ensure set specific header size */
-+ set_save->header_size = set->type->header_size;
-+ if (*used + set_save->header_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in the header */
-+ set_save->index = index;
-+ set_save->binding = set->binding;
-+
-+ /* Fill in set spefific header data */
-+ set->type->list_header(set, data + *used);
-+ *used += set_save->header_size;
-+
-+ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->header_size, data, data + *used);
-+ /* Get and ensure set specific members size */
-+ set_save->members_size = set->type->list_members_size(set);
-+ if (*used + set_save->members_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in set spefific members data */
-+ set->type->list_members(set, data + *used);
-+ *used += set_save->members_size;
-+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
-+ set_save->members_size, data, data + *used);
-+ return 0;
-+
-+ unlock_set:
-+ read_unlock_bh(&set->lock);
-+ not_enough_mem:
-+ DP("not enough mem, try again");
-+ return -EAGAIN;
-+}
-+
-+static inline void
-+__set_hash_save_bindings(struct ip_set_hash *set_hash,
-+ ip_set_id_t id,
-+ void *data,
-+ int *used,
-+ int len,
-+ int *res)
-+{
-+ if (*res == 0
-+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
-+ (struct ip_set_hash_save *)(data + *used);
-+ /* Ensure bindings size */
-+ if (*used + sizeof(struct ip_set_hash_save) > len) {
-+ *res = -ENOMEM;
-+ return;
-+ }
-+ hash_save->id = set_hash->id;
-+ hash_save->ip = set_hash->ip;
-+ hash_save->binding = set_hash->binding;
-+ *used += sizeof(struct ip_set_hash_save);
-+ }
-+}
-+
-+static int ip_set_save_bindings(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ int res = 0;
-+ struct ip_set_save *set_save;
-+
-+ DP("used %u, len %u", *used, len);
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_save) > len)
-+ return -ENOMEM;
-+
-+ /* Marker */
-+ set_save = (struct ip_set_save *) (data + *used);
-+ set_save->index = IP_SET_INVALID_ID;
-+ set_save->header_size = 0;
-+ set_save->members_size = 0;
-+ *used += sizeof(struct ip_set_save);
-+
-+ DP("marker added used %u, len %u", *used, len);
-+ /* Fill in bindings data */
-+ if (index != IP_SET_INVALID_ID)
-+ /* Sets are identified by id in hash */
-+ index = ip_set_list[index]->id;
-+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
-+
-+ return res;
-+}
-+
-+/*
-+ * Restore sets
-+ */
-+static int ip_set_restore(void *data,
-+ int len)
-+{
-+ int res = 0;
-+ int line = 0, used = 0, members_size;
-+ struct ip_set *set;
-+ struct ip_set_hash_save *hash_save;
-+ struct ip_set_restore *set_restore;
-+ ip_set_id_t index;
-+
-+ /* Loop to restore sets */
-+ while (1) {
-+ line++;
-+
-+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
-+ /* Get and ensure header size */
-+ if (used + sizeof(struct ip_set_restore) > len)
-+ return line;
-+ set_restore = (struct ip_set_restore *) (data + used);
-+ used += sizeof(struct ip_set_restore);
-+
-+ /* Ensure data size */
-+ if (used
-+ + set_restore->header_size
-+ + set_restore->members_size > len)
-+ return line;
-+
-+ /* Check marker */
-+ if (set_restore->index == IP_SET_INVALID_ID) {
-+ line--;
-+ goto bindings;
-+ }
-+
-+ /* Try to create the set */
-+ DP("restore %s %s", set_restore->name, set_restore->typename);
-+ res = ip_set_create(set_restore->name,
-+ set_restore->typename,
-+ set_restore->index,
-+ data + used,
-+ set_restore->header_size);
-+
-+ if (res != 0)
-+ return line;
-+ used += set_restore->header_size;
-+
-+ index = ip_set_find_byindex(set_restore->index);
-+ DP("index %u, restore_index %u", index, set_restore->index);
-+ if (index != set_restore->index)
-+ return line;
-+ /* Try to restore members data */
-+ set = ip_set_list[index];
-+ members_size = 0;
-+ DP("members_size %u reqsize %u",
-+ set_restore->members_size, set->type->reqsize);
-+ while (members_size + set->type->reqsize <=
-+ set_restore->members_size) {
-+ line++;
-+ DP("members: %u, line %u", members_size, line);
-+ res = __ip_set_addip(index,
-+ data + used + members_size,
-+ set->type->reqsize);
-+ if (!(res == 0 || res == -EEXIST))
-+ return line;
-+ members_size += set->type->reqsize;
-+ }
-+
-+ DP("members_size %u %u",
-+ set_restore->members_size, members_size);
-+ if (members_size != set_restore->members_size)
-+ return line++;
-+ used += set_restore->members_size;
-+ }
-+
-+ bindings:
-+ /* Loop to restore bindings */
-+ while (used < len) {
-+ line++;
-+
-+ DP("restore binding, line %u", line);
-+ /* Get and ensure size */
-+ if (used + sizeof(struct ip_set_hash_save) > len)
-+ return line;
-+ hash_save = (struct ip_set_hash_save *) (data + used);
-+ used += sizeof(struct ip_set_hash_save);
-+
-+ /* hash_save->id is used to store the index */
-+ index = ip_set_find_byindex(hash_save->id);
-+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
-+ if (index != hash_save->id)
-+ return line;
-+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
-+ DP("corrupt binding set index %u", hash_save->binding);
-+ return line;
-+ }
-+ set = ip_set_list[hash_save->id];
-+ /* Null valued IP means default binding */
-+ if (hash_save->ip)
-+ res = ip_set_hash_add(set->id,
-+ hash_save->ip,
-+ hash_save->binding);
-+ else {
-+ IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
-+ write_lock_bh(&ip_set_lock);
-+ set->binding = hash_save->binding;
-+ __ip_set_get(set->binding);
-+ write_unlock_bh(&ip_set_lock);
-+ DP("default binding: %u", set->binding);
-+ }
-+ if (res != 0)
-+ return line;
-+ }
-+ if (used != len)
-+ return line;
-+
-+ return 0;
-+}
-+
-+static int
-+ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
-+{
-+ void *data;
-+ int res = 0; /* Assume OK */
-+ unsigned *op;
-+ struct ip_set_req_adt *req_adt;
-+ ip_set_id_t index = IP_SET_INVALID_ID;
-+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, size_t size);
-+ struct fn_table {
-+ int (*fn)(ip_set_id_t index,
-+ const void *data, size_t size);
-+ } adtfn_table[] =
-+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
-+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
-+ };
-+
-+ DP("optval=%d, user=%p, len=%d", optval, user, len);
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (optval != SO_IP_SET)
-+ return -EBADF;
-+ if (len <= sizeof(unsigned)) {
-+ ip_set_printk("short userdata (want >%zu, got %u)",
-+ sizeof(unsigned), len);
-+ return -EINVAL;
-+ }
-+ data = vmalloc(len);
-+ if (!data) {
-+ DP("out of mem for %u bytes", len);
-+ return -ENOMEM;
-+ }
-+ if (copy_from_user(data, user, len) != 0) {
-+ res = -EFAULT;
-+ goto done;
-+ }
-+ if (down_interruptible(&ip_set_app_mutex)) {
-+ res = -EINTR;
-+ goto done;
-+ }
-+
-+ op = (unsigned *)data;
-+ DP("op=%x", *op);
-+
-+ if (*op < IP_SET_OP_VERSION) {
-+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
-+ res = -EPROTO;
-+ goto done;
-+ }
-+ }
-+
-+ switch (*op) {
-+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create
-+ = (struct ip_set_req_create *) data;
-+
-+ if (len < sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+ res = ip_set_create(req_create->name,
-+ req_create->typename,
-+ IP_SET_INVALID_ID,
-+ data + sizeof(struct ip_set_req_create),
-+ len - sizeof(struct ip_set_req_create));
-+ goto done;
-+ }
-+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy
-+ = (struct ip_set_req_std *) data;
-+
-+ if (len != sizeof(struct ip_set_req_std)) {
-+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_std), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
-+ /* Destroy all sets */
-+ index = IP_SET_INVALID_ID;
-+ } else {
-+ req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_destroy->name);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+
-+ res = ip_set_destroy(index);
-+ goto done;
-+ }
-+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush =
-+ (struct ip_set_req_std *) data;
-+
-+ if (len != sizeof(struct ip_set_req_std)) {
-+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_std), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
-+ /* Flush all sets */
-+ index = IP_SET_INVALID_ID;
-+ } else {
-+ req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_flush->name);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ res = ip_set_flush(index);
-+ goto done;
-+ }
-+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename
-+ = (struct ip_set_req_create *) data;
-+
-+ if (len != sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ index = ip_set_find_byname(req_rename->name);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ res = ip_set_rename(index, req_rename->typename);
-+ goto done;
-+ }
-+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap
-+ = (struct ip_set_req_create *) data;
-+ ip_set_id_t to_index;
-+
-+ if (len != sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("invalid SWAP data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ index = ip_set_find_byname(req_swap->name);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ to_index = ip_set_find_byname(req_swap->typename);
-+ if (to_index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ res = ip_set_swap(index, to_index);
-+ goto done;
-+ }
-+ default:
-+ break; /* Set identified by id */
-+ }
-+
-+ /* There we may have add/del/test/bind/unbind/test_bind operations */
-+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
-+ res = -EBADMSG;
-+ goto done;
-+ }
-+ adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
-+
-+ if (len < sizeof(struct ip_set_req_adt)) {
-+ ip_set_printk("short data in adt request (want >=%zu, got %u)",
-+ sizeof(struct ip_set_req_adt), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_adt = (struct ip_set_req_adt *) data;
-+
-+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
-+ if (!(*op == IP_SET_OP_UNBIND_SET
-+ && req_adt->index == IP_SET_INVALID_ID)) {
-+ index = ip_set_find_byindex(req_adt->index);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ res = adtfn(index, data, len);
-+
-+ done:
-+ up(&ip_set_app_mutex);
-+ vfree(data);
-+ if (res > 0)
-+ res = 0;
-+ DP("final result %d", res);
-+ return res;
-+}
-+
-+static int
-+ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
-+{
-+ int res = 0;
-+ unsigned *op;
-+ ip_set_id_t index = IP_SET_INVALID_ID;
-+ void *data;
-+ int copylen = *len;
-+
-+ DP("optval=%d, user=%p, len=%d", optval, user, *len);
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (optval != SO_IP_SET)
-+ return -EBADF;
-+ if (*len < sizeof(unsigned)) {
-+ ip_set_printk("short userdata (want >=%zu, got %d)",
-+ sizeof(unsigned), *len);
-+ return -EINVAL;
-+ }
-+ data = vmalloc(*len);
-+ if (!data) {
-+ DP("out of mem for %d bytes", *len);
-+ return -ENOMEM;
-+ }
-+ if (copy_from_user(data, user, *len) != 0) {
-+ res = -EFAULT;
-+ goto done;
-+ }
-+ if (down_interruptible(&ip_set_app_mutex)) {
-+ res = -EINTR;
-+ goto done;
-+ }
-+
-+ op = (unsigned *) data;
-+ DP("op=%x", *op);
-+
-+ if (*op < IP_SET_OP_VERSION) {
-+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
-+ res = -EPROTO;
-+ goto done;
-+ }
-+ }
-+
-+ switch (*op) {
-+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version =
-+ (struct ip_set_req_version *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_version)) {
-+ ip_set_printk("invalid VERSION (want %zu, got %d)",
-+ sizeof(struct ip_set_req_version),
-+ *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_version->version = IP_SET_PROTOCOL_VERSION;
-+ res = copy_to_user(user, req_version,
-+ sizeof(struct ip_set_req_version));
-+ goto done;
-+ }
-+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_get_set)) {
-+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
-+ sizeof(struct ip_set_req_get_set), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_get->set.name);
-+ req_get->set.index = index;
-+ goto copy;
-+ }
-+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get
-+ = (struct ip_set_req_get_set *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_get_set)) {
-+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
-+ sizeof(struct ip_set_req_get_set), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byindex(req_get->set.index);
-+ strncpy(req_get->set.name,
-+ index == IP_SET_INVALID_ID ? ""
-+ : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
-+ goto copy;
-+ }
-+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get
-+ = (struct ip_set_req_adt_get *) data;
-+
-+ if (*len != sizeof(struct ip_set_req_adt_get)) {
-+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
-+ sizeof(struct ip_set_req_adt_get), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_get->set.name);
-+ if (index != IP_SET_INVALID_ID) {
-+ req_get->set.index = index;
-+ strncpy(req_get->typename,
-+ ip_set_list[index]->type->typename,
-+ IP_SET_MAXNAMELEN - 1);
-+ } else {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets
-+ = (struct ip_set_req_max_sets *) data;
-+ ip_set_id_t i;
-+
-+ if (*len != sizeof(struct ip_set_req_max_sets)) {
-+ ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
-+ sizeof(struct ip_set_req_max_sets), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
-+ req_max_sets->set.index = IP_SET_INVALID_ID;
-+ } else {
-+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_max_sets->set.index =
-+ ip_set_find_byname(req_max_sets->set.name);
-+ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ req_max_sets->max_sets = ip_set_max;
-+ req_max_sets->sets = 0;
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL)
-+ req_max_sets->sets++;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_LIST_SIZE:
-+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames
-+ = (struct ip_set_req_setnames *) data;
-+ struct ip_set_name_list *name_list;
-+ struct ip_set *set;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_setnames)) {
-+ ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_setnames), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_setnames->size = 0;
-+ used = sizeof(struct ip_set_req_setnames);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] == NULL)
-+ continue;
-+ name_list = (struct ip_set_name_list *)
-+ (data + used);
-+ used += sizeof(struct ip_set_name_list);
-+ if (used > copylen) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ set = ip_set_list[i];
-+ /* Fill in index, name, etc. */
-+ name_list->index = i;
-+ name_list->id = set->id;
-+ strncpy(name_list->name,
-+ set->name,
-+ IP_SET_MAXNAMELEN - 1);
-+ strncpy(name_list->typename,
-+ set->type->typename,
-+ IP_SET_MAXNAMELEN - 1);
-+ DP("filled %s of type %s, index %u\n",
-+ name_list->name, name_list->typename,
-+ name_list->index);
-+ if (!(req_setnames->index == IP_SET_INVALID_ID
-+ || req_setnames->index == i))
-+ continue;
-+ /* Update size */
-+ switch (*op) {
-+ case IP_SET_OP_LIST_SIZE: {
-+ req_setnames->size += sizeof(struct ip_set_list)
-+ + set->type->header_size
-+ + set->type->list_members_size(set);
-+ /* Sets are identified by id in the hash */
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
-+ set->id, &req_setnames->size);
-+ break;
-+ }
-+ case IP_SET_OP_SAVE_SIZE: {
-+ req_setnames->size += sizeof(struct ip_set_save)
-+ + set->type->header_size
-+ + set->type->list_members_size(set);
-+ FOREACH_HASH_DO(__set_hash_bindings_size_save,
-+ set->id, &req_setnames->size);
-+ break;
-+ }
-+ default:
-+ break;
-+ }
-+ }
-+ if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list
-+ = (struct ip_set_req_list *) data;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_list)) {
-+ ip_set_printk("short LIST (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_list), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ index = req_list->index;
-+ if (index != IP_SET_INVALID_ID
-+ && ip_set_find_byindex(index) != index) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ used = 0;
-+ if (index == IP_SET_INVALID_ID) {
-+ /* List all sets */
-+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
-+ res = ip_set_list_set(i, data, &used, *len);
-+ }
-+ } else {
-+ /* List an individual set */
-+ res = ip_set_list_set(index, data, &used, *len);
-+ }
-+ if (res != 0)
-+ goto done;
-+ else if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save
-+ = (struct ip_set_req_list *) data;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_list)) {
-+ ip_set_printk("short SAVE (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_list), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ index = req_save->index;
-+ if (index != IP_SET_INVALID_ID
-+ && ip_set_find_byindex(index) != index) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ used = 0;
-+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets */
-+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
-+ res = ip_set_save_set(i, data, &used, *len);
-+ }
-+ } else {
-+ /* Save an individual set */
-+ res = ip_set_save_set(index, data, &used, *len);
-+ }
-+ if (res == 0)
-+ res = ip_set_save_bindings(index, data, &used, *len);
-+
-+ if (res != 0)
-+ goto done;
-+ else if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore
-+ = (struct ip_set_req_setnames *) data;
-+ int line;
-+
-+ if (*len < sizeof(struct ip_set_req_setnames)
-+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
-+ req_restore->size, *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
-+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %u", line);
-+ if (line != 0) {
-+ res = -EAGAIN;
-+ req_restore->size = line;
-+ copylen = sizeof(struct ip_set_req_setnames);
-+ goto copy;
-+ }
-+ goto done;
-+ }
-+ default:
-+ res = -EBADMSG;
-+ goto done;
-+ } /* end of switch(op) */
-+
-+ copy:
-+ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
-+ && ip_set_list[index]
-+ ? ip_set_list[index]->name
-+ : ":all:", copylen);
-+ res = copy_to_user(user, data, copylen);
-+
-+ done:
-+ up(&ip_set_app_mutex);
-+ vfree(data);
-+ if (res > 0)
-+ res = 0;
-+ DP("final result %d", res);
-+ return res;
-+}
-+
-+static struct nf_sockopt_ops so_set = {
-+ .pf = PF_INET,
-+ .set_optmin = SO_IP_SET,
-+ .set_optmax = SO_IP_SET + 1,
-+ .set = &ip_set_sockfn_set,
-+ .get_optmin = SO_IP_SET,
-+ .get_optmax = SO_IP_SET + 1,
-+ .get = &ip_set_sockfn_get,
-+ .use = 0
-+};
-+
-+static int max_sets, hash_size;
-+module_param(max_sets, int, 0600);
-+MODULE_PARM_DESC(max_sets, "maximal number of sets");
-+module_param(hash_size, int, 0600);
-+MODULE_PARM_DESC(hash_size, "hash size for bindings");
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("module implementing core IP set support");
-+
-+static int __init init(void)
-+{
-+ int res;
-+ ip_set_id_t i;
-+
-+ get_random_bytes(&ip_set_hash_random, 4);
-+ if (max_sets)
-+ ip_set_max = max_sets;
-+ ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
-+ if (!ip_set_list) {
-+ printk(KERN_ERR "Unable to create ip_set_list\n");
-+ return -ENOMEM;
-+ }
-+ memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
-+ if (hash_size)
-+ ip_set_bindings_hash_size = hash_size;
-+ ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
-+ if (!ip_set_hash) {
-+ printk(KERN_ERR "Unable to create ip_set_hash\n");
-+ vfree(ip_set_list);
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < ip_set_bindings_hash_size; i++)
-+ INIT_LIST_HEAD(&ip_set_hash[i]);
-+
-+ INIT_LIST_HEAD(&set_type_list);
-+
-+ res = nf_register_sockopt(&so_set);
-+ if (res != 0) {
-+ ip_set_printk("SO_SET registry failed: %d", res);
-+ vfree(ip_set_list);
-+ vfree(ip_set_hash);
-+ return res;
-+ }
-+ return 0;
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* There can't be any existing set or binding */
-+ nf_unregister_sockopt(&so_set);
-+ vfree(ip_set_list);
-+ vfree(ip_set_hash);
-+ DP("these are the famous last words");
-+}
-+
-+EXPORT_SYMBOL(ip_set_register_set_type);
-+EXPORT_SYMBOL(ip_set_unregister_set_type);
-+
-+EXPORT_SYMBOL(ip_set_get_byname);
-+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put);
-+
-+EXPORT_SYMBOL(ip_set_addip_kernel);
-+EXPORT_SYMBOL(ip_set_delip_kernel);
-+EXPORT_SYMBOL(ip_set_testip_kernel);
-+
-+module_init(init);
-+module_exit(fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iphash.c
---- linux-2.6.21.1/net/ipv4/netfilter/ip_set_iphash.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iphash.c 2007-06-08 16:29:31.829808250 -0500
-@@ -0,0 +1,413 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip hash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_iphash.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ if (!ip || map->elements > limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *hash_ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = *hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t hash_ip, *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_iphash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->netmask = map->netmask;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_iphash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip(tmp, *elem, &hash_ip);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t id, *elem;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id(set, ip, hash_ip);
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iphash *req =
-+ (struct ip_set_req_iphash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iphash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_iphash_create *req =
-+ (struct ip_set_req_iphash_create *) data;
-+ struct ip_set_iphash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_iphash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iphash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iphash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->netmask = req->netmask;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ struct ip_set_req_iphash_create *header =
-+ (struct ip_set_req_iphash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->netmask = map->netmask;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_iphash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iphash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iphash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iphash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_iphash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iphash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipmap.c
---- linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipmap.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipmap.c 2007-06-08 16:29:31.833808500 -0500
-@@ -0,0 +1,327 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the single bitmap type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_ipmap.h>
-+
-+static inline ip_set_ip_t
-+ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
-+{
-+ return (ip - map->first_ip)/map->hosts;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
-+
-+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
-+ return -EEXIST;
-+
-+ return 0;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
-+ return __addip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
-+ return -EEXIST;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipmap *req =
-+ (struct ip_set_req_ipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_ipmap_create *req =
-+ (struct ip_set_req_ipmap_create *) data;
-+ struct ip_set_ipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_ipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipmap));
-+ return -ENOMEM;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->netmask = req->netmask;
-+
-+ if (req->netmask == 0xFFFFFFFF) {
-+ map->hosts = 1;
-+ map->sizeid = map->last_ip - map->first_ip + 1;
-+ } else {
-+ unsigned int mask_bits, netmask_bits;
-+ ip_set_ip_t mask;
-+
-+ map->first_ip &= map->netmask; /* Should we better bark? */
-+
-+ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
-+ netmask_bits = mask_to_bits(map->netmask);
-+
-+ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
-+ || netmask_bits <= mask_bits)
-+ return -ENOEXEC;
-+
-+ DP("mask_bits %u, netmask_bits %u",
-+ mask_bits, netmask_bits);
-+ map->hosts = 2 << (32 - netmask_bits - 1);
-+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
-+ }
-+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ kfree(map);
-+ return -ENOEXEC;
-+ }
-+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ newbytes = bitmap_bytes(0, map->sizeid - 1);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ struct ip_set_req_ipmap_create *header =
-+ (struct ip_set_req_ipmap_create *) data;
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+ header->netmask = map->netmask;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+ int bytes = bitmap_bytes(0, map->sizeid - 1);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_ipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipporthash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipporthash.c
---- linux-2.6.21.1/net/ipv4/netfilter/ip_set_ipporthash.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_ipporthash.c 2007-06-08 16:29:31.833808500 -0500
-@@ -0,0 +1,535 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip+port hash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+ struct iphdr *iph = skb->nh.iph;
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map =
-+ (struct ip_set_ipporthash *) set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = HASH_IP(map, ip, port);
-+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut at testing - there can be deleted
-+ * entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return 0;
-+
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static inline int
-+__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == hash_ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = hash_ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (map->elements > limit)
-+ return -ERANGE;
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = HASH_IP(map, ip, port);
-+
-+ return __add_haship(map, *hash_ip);
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_ipporthash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new hash size */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __add_haship(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ id = hash_id(set, ip, port, hash_ip);
-+
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_ipporthash *req =
-+ (struct ip_set_req_ipporthash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, req->port, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port;
-+
-+ if (flags[index+1] == 0)
-+ return -EINVAL;
-+
-+ port = get_port(skb, flags[index+1]);
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ port,
-+ hash_ip);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_ipporthash_create *req =
-+ (struct ip_set_req_ipporthash_create *) data;
-+ struct ip_set_ipporthash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_ipporthash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_ipporthash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ struct ip_set_req_ipporthash_create *header =
-+ (struct ip_set_req_ipporthash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_ipporthash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_ipporthash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_ipporthash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_ipporthash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_ipporthash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iptree.c
---- linux-2.6.21.1/net/ipv4/netfilter/ip_set_iptree.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_iptree.c 2007-06-08 16:29:31.833808500 -0500
-@@ -0,0 +1,571 @@
-+/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the iptree type */
-+
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+
-+#include <linux/netfilter_ipv4/ip_set_iptree.h>
-+
-+static int limit = MAX_RANGE;
-+
-+/* Garbage collection interval in seconds: */
-+#define IPTREE_GC_TIME 5*60
-+/* Sleep so many milliseconds before trying again
-+ * to delete the gc timer at destroying/flushing a set */
-+#define IPTREE_DESTROY_SLEEP 100
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+static struct kmem_cache *branch_cachep;
-+static struct kmem_cache *leaf_cachep;
-+#else
-+static kmem_cache_t *branch_cachep;
-+static kmem_cache_t *leaf_cachep;
-+#endif
-+
-+#define ABCD(a,b,c,d,addrp) do { \
-+ a = ((unsigned char *)addrp)[3]; \
-+ b = ((unsigned char *)addrp)[2]; \
-+ c = ((unsigned char *)addrp)[1]; \
-+ d = ((unsigned char *)addrp)[0]; \
-+} while (0)
-+
-+#define TESTIP_WALK(map, elem, branch) do { \
-+ if ((map)->tree[elem]) { \
-+ branch = (map)->tree[elem]; \
-+ } else \
-+ return 0; \
-+} while (0)
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
-+ TESTIP_WALK(map, a, btree);
-+ TESTIP_WALK(btree, b, ctree);
-+ TESTIP_WALK(ctree, c, dtree);
-+ DP("%lu %lu", dtree->expires[d], jiffies);
-+ return !!(map->timeout ? (time_after(dtree->expires[d], jiffies))
-+ : dtree->expires[d]);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+ return (res < 0 ? 0 : res);
-+}
-+
-+#define ADDIP_WALK(map, elem, branch, type, cachep, flags) do { \
-+ if ((map)->tree[elem]) { \
-+ DP("found %u", elem); \
-+ branch = (map)->tree[elem]; \
-+ } else { \
-+ branch = (type *) \
-+ kmem_cache_alloc(cachep, flags); \
-+ if (branch == NULL) \
-+ return -ENOMEM; \
-+ memset(branch, 0, sizeof(*branch)); \
-+ (map)->tree[elem] = branch; \
-+ DP("alloc %u", elem); \
-+ } \
-+} while (0)
-+
-+static inline int
-+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
-+ ip_set_ip_t *hash_ip,
-+ unsigned int __nocast flags)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+ int ret = 0;
-+
-+ if (!ip || map->elements > limit)
-+ /* We could call the garbage collector
-+ * but it's probably overkill */
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
-+ ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep, flags);
-+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep, flags);
-+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep, flags);
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
-+ ret = -EEXIST;
-+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
-+ /* Lottery */
-+ if (dtree->expires[d] == 0)
-+ dtree->expires[d] = 1;
-+ DP("%u %lu", d, dtree->expires[d]);
-+ if (ret == 0)
-+ map->elements++;
-+ return ret;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
-+ return __addip(set, req->ip,
-+ req->timeout ? req->timeout : map->timeout,
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ map->timeout,
-+ hash_ip,
-+ GFP_ATOMIC);
-+}
-+
-+#define DELIP_WALK(map, elem, branch) do { \
-+ if ((map)->tree[elem]) { \
-+ branch = (map)->tree[elem]; \
-+ } else \
-+ return -EEXIST; \
-+} while (0)
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DELIP_WALK(map, a, btree);
-+ DELIP_WALK(btree, b, ctree);
-+ DELIP_WALK(ctree, c, dtree);
-+
-+ if (dtree->expires[d]) {
-+ dtree->expires[d] = 0;
-+ map->elements--;
-+ return 0;
-+ }
-+ return -EEXIST;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_iptree *req =
-+ (struct ip_set_req_iptree *) data;
-+
-+ if (size != sizeof(struct ip_set_req_iptree)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+}
-+
-+#define LOOP_WALK_BEGIN(map, i, branch) \
-+ for (i = 0; i < 256; i++) { \
-+ if (!(map)->tree[i]) \
-+ continue; \
-+ branch = (map)->tree[i]
-+
-+#define LOOP_WALK_END }
-+
-+static void ip_tree_gc(unsigned long ul_set)
-+{
-+ struct ip_set *set = (void *) ul_set;
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ unsigned char i,j,k;
-+
-+ i = j = k = 0;
-+ DP("gc: %s", set->name);
-+ write_lock_bh(&set->lock);
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]) {
-+ DP("gc: %u %u %u %u: expires %lu jiffies %lu",
-+ a, b, c, d,
-+ dtree->expires[d], jiffies);
-+ if (map->timeout
-+ && time_before(dtree->expires[d], jiffies)) {
-+ dtree->expires[d] = 0;
-+ map->elements--;
-+ } else
-+ k = 1;
-+ }
-+ }
-+ if (k == 0) {
-+ DP("gc: %s: leaf %u %u %u empty",
-+ set->name, a, b, c);
-+ kmem_cache_free(leaf_cachep, dtree);
-+ ctree->tree[c] = NULL;
-+ } else {
-+ DP("gc: %s: leaf %u %u %u not empty",
-+ set->name, a, b, c);
-+ j = 1;
-+ k = 0;
-+ }
-+ LOOP_WALK_END;
-+ if (j == 0) {
-+ DP("gc: %s: branch %u %u empty",
-+ set->name, a, b);
-+ kmem_cache_free(branch_cachep, ctree);
-+ btree->tree[b] = NULL;
-+ } else {
-+ DP("gc: %s: branch %u %u not empty",
-+ set->name, a, b);
-+ i = 1;
-+ j = k = 0;
-+ }
-+ LOOP_WALK_END;
-+ if (i == 0) {
-+ DP("gc: %s: branch %u empty",
-+ set->name, a);
-+ kmem_cache_free(branch_cachep, btree);
-+ map->tree[a] = NULL;
-+ } else {
-+ DP("gc: %s: branch %u not empty",
-+ set->name, a);
-+ i = j = k = 0;
-+ }
-+ LOOP_WALK_END;
-+ write_unlock_bh(&set->lock);
-+
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static inline void init_gc_timer(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ /* Even if there is no timeout for the entries,
-+ * we still have to call gc because delete
-+ * do not clean up empty branches */
-+ map->gc_interval = IPTREE_GC_TIME;
-+ init_timer(&map->gc);
-+ map->gc.data = (unsigned long) set;
-+ map->gc.function = ip_tree_gc;
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_iptree_create *req =
-+ (struct ip_set_req_iptree_create *) data;
-+ struct ip_set_iptree *map;
-+
-+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_iptree_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_iptree));
-+ return -ENOMEM;
-+ }
-+ memset(map, 0, sizeof(*map));
-+ map->timeout = req->timeout;
-+ map->elements = 0;
-+ set->data = map;
-+
-+ init_gc_timer(set);
-+
-+ return 0;
-+}
-+
-+static void __flush(struct ip_set_iptree *map)
-+{
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ kmem_cache_free(leaf_cachep, dtree);
-+ LOOP_WALK_END;
-+ kmem_cache_free(branch_cachep, ctree);
-+ LOOP_WALK_END;
-+ kmem_cache_free(branch_cachep, btree);
-+ LOOP_WALK_END;
-+ map->elements = 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+
-+ /* gc might be running */
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREE_DESTROY_SLEEP);
-+ __flush(map);
-+ kfree(map);
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ unsigned int timeout = map->timeout;
-+
-+ /* gc might be running */
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREE_DESTROY_SLEEP);
-+ __flush(map);
-+ memset(map, 0, sizeof(*map));
-+ map->timeout = timeout;
-+
-+ init_gc_timer(set);
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree_create *header =
-+ (struct ip_set_req_iptree_create *) data;
-+
-+ header->timeout = map->timeout;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ unsigned int count = 0;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
-+ count++;
-+ }
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+
-+ DP("members %u", count);
-+ return (count * sizeof(struct ip_set_req_iptree));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ size_t offset = 0;
-+ struct ip_set_req_iptree *entry;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = (struct ip_set_req_iptree *)(data + offset);
-+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
-+ entry->timeout = !map->timeout ? 0
-+ : (dtree->expires[d] - jiffies)/HZ;
-+ offset += sizeof(struct ip_set_req_iptree);
-+ }
-+ }
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+}
-+
-+static struct ip_set_type ip_set_iptree = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_iptree),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_iptree_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptree type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ int ret;
-+
-+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb),
-+ 0, 0, NULL, NULL);
-+ if (!branch_cachep) {
-+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ leaf_cachep = kmem_cache_create("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed),
-+ 0, 0, NULL, NULL);
-+ if (!leaf_cachep) {
-+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
-+ ret = -ENOMEM;
-+ goto free_branch;
-+ }
-+ ret = ip_set_register_set_type(&ip_set_iptree);
-+ if (ret == 0)
-+ goto out;
-+
-+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
-+ kmem_cache_destroy(branch_cachep);
-+ out:
-+ return ret;
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iptree);
-+ kmem_cache_destroy(leaf_cachep);
-+ kmem_cache_destroy(branch_cachep);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_macipmap.c
---- linux-2.6.21.1/net/ipv4/netfilter/ip_set_macipmap.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_macipmap.c 2007-06-08 16:29:31.833808500 -0500
-@@ -0,0 +1,353 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the macipmap type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/if_ether.h>
-+#include <linux/vmalloc.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
-+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->ip < map->first_ip || req->ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = req->ip;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[req->ip - map->first_ip].flags)) {
-+ return (memcmp(req->ethernet,
-+ &table[req->ip - map->first_ip].ethernet,
-+ ETH_ALEN) == 0);
-+ } else {
-+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
-+ }
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
-+ flags[index] & IPSET_SRC ? "SRC" : "DST",
-+ NIPQUAD(skb->nh.iph->saddr),
-+ NIPQUAD(skb->nh.iph->daddr));
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return 0;
-+
-+ *hash_ip = ip;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags)) {
-+ /* Is mac pointer valid?
-+ * If so, compare... */
-+ return (skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data
-+ && (memcmp(eth_hdr(skb)->h_source,
-+ &table[ip - map->first_ip].ethernet,
-+ ETH_ALEN) == 0));
-+ } else {
-+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
-+ }
-+}
-+
-+/* returns 0 on success */
-+static inline int
-+__addip(struct ip_set *set,
-+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
-+ (void *) &table[ip - map->first_ip].flags))
-+ return -EEXIST;
-+
-+ *hash_ip = ip;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
-+ return 0;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addip(set, req->ip, req->ethernet, hash_ip);
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t ip;
-+
-+ ip = ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+
-+ if (!(skb->mac.raw >= skb->head
-+ && (skb->mac.raw + ETH_HLEN) <= skb->data))
-+ return -EINVAL;
-+
-+ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
-+}
-+
-+static inline int
-+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table =
-+ (struct ip_set_macip *) map->members;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
-+ (void *)&table[ip - map->first_ip].flags))
-+ return -EEXIST;
-+
-+ *hash_ip = ip;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_macipmap *req =
-+ (struct ip_set_req_macipmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delip(set, req->ip, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+}
-+
-+static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
-+{
-+ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_macipmap_create *req =
-+ (struct ip_set_req_macipmap_create *) data;
-+ struct ip_set_macipmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_macipmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_macipmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
-+ HIPQUAD(req->from), HIPQUAD(req->to));
-+
-+ if (req->from > req->to) {
-+ DP("bad ip range");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d addresses)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_macipmap));
-+ return -ENOMEM;
-+ }
-+ map->flags = req->flags;
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ newbytes = members_size(map->first_ip, map->last_ip);
-+ map->members = ip_set_malloc(newbytes);
-+ DP("members: %u %p", newbytes, map->members);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+ struct ip_set_req_macipmap_create *header =
-+ (struct ip_set_req_macipmap_create *) data;
-+
-+ DP("list_header %x %x %u", map->first_ip, map->last_ip,
-+ map->flags);
-+
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+ header->flags = map->flags;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ DP("%u", members_size(map->first_ip, map->last_ip));
-+ return members_size(map->first_ip, map->last_ip);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_macipmap *map =
-+ (struct ip_set_macipmap *) set->data;
-+
-+ int bytes = members_size(map->first_ip, map->last_ip);
-+
-+ DP("members: %u %p", bytes, map->members);
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_macipmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_macipmap),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_macipmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("macipmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ init_max_malloc_size();
-+ return ip_set_register_set_type(&ip_set_macipmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_macipmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_nethash.c
---- linux-2.6.21.1/net/ipv4/netfilter/ip_set_nethash.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_nethash.c 2007-06-08 16:29:31.833808500 -0500
-@@ -0,0 +1,481 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing a cidr nethash set */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/random.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+#include <linux/netfilter_ipv4/ip_set_nethash.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
-+{
-+ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
-+}
-+
-+static inline __u32
-+hash_id_cidr(struct ip_set_nethash *map,
-+ ip_set_ip_t ip,
-+ unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = pack(ip, cidr);
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline __u32
-+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ __u32 id = UINT_MAX;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
-+ if (id != UINT_MAX)
-+ break;
-+ }
-+ return id;
-+}
-+
-+static inline int
-+__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
-+}
-+
-+static inline int
-+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
-+{
-+ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
-+}
-+
-+static int
-+testip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
-+ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
-+}
-+
-+static int
-+testip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr),
-+ hash_ip);
-+}
-+
-+static inline int
-+__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == ip)
-+ return -EEXIST;
-+ if (!*elem) {
-+ *elem = ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ if (!ip || map->elements > limit)
-+ return -ERANGE;
-+
-+ *hash_ip = pack(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+
-+ return __addip_base(map, *hash_ip);
-+}
-+
-+static void
-+update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
-+{
-+ unsigned char next;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ if (map->cidr[i] == cidr) {
-+ return;
-+ } else if (map->cidr[i] < cidr) {
-+ next = map->cidr[i];
-+ map->cidr[i] = cidr;
-+ cidr = next;
-+ }
-+ }
-+ if (i < 30)
-+ map->cidr[i] = cidr;
-+}
-+
-+static int
-+addip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+ int ret;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+
-+ if (ret == 0)
-+ update_cidr_sizes((struct ip_set_nethash *) set->data,
-+ req->cidr);
-+
-+ return ret;
-+}
-+
-+static int
-+addip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+
-+ if (map->cidr[0])
-+ ret = __addip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
-+
-+static int retry(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t *elem;
-+ void *members;
-+ u_int32_t i, hashsize = map->hashsize;
-+ int res;
-+ struct ip_set_nethash *tmp;
-+
-+ if (map->resize == 0)
-+ return -ERANGE;
-+
-+ again:
-+ res = 0;
-+
-+ /* Calculate new parameters */
-+ hashsize += (hashsize * map->resize)/100;
-+ if (hashsize == map->hashsize)
-+ hashsize++;
-+
-+ ip_set_printk("rehashing of set %s triggered: "
-+ "hashsize grows from %u to %u",
-+ set->name, map->hashsize, hashsize);
-+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!tmp) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + map->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
-+ if (!tmp->members) {
-+ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
-+ kfree(tmp);
-+ return -ENOMEM;
-+ }
-+ tmp->hashsize = hashsize;
-+ tmp->elements = 0;
-+ tmp->probes = map->probes;
-+ tmp->resize = map->resize;
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
-+ write_lock_bh(&set->lock);
-+ map = (struct ip_set_nethash *) set->data; /* Play safe */
-+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ if (*elem)
-+ res = __addip_base(tmp, *elem);
-+ }
-+ if (res) {
-+ /* Failure, try again */
-+ write_unlock_bh(&set->lock);
-+ harray_free(tmp->members);
-+ kfree(tmp);
-+ goto again;
-+ }
-+
-+ /* Success at resizing! */
-+ members = map->members;
-+
-+ map->hashsize = tmp->hashsize;
-+ map->members = tmp->members;
-+ write_unlock_bh(&set->lock);
-+
-+ harray_free(members);
-+ kfree(tmp);
-+
-+ return 0;
-+}
-+
-+static inline int
-+__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
-+ ip_set_ip_t *hash_ip)
-+{
-+ ip_set_ip_t id, *elem;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = hash_id_cidr(map, ip, cidr, hash_ip);
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+ return 0;
-+}
-+
-+static int
-+delip(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_req_nethash *req =
-+ (struct ip_set_req_nethash *) data;
-+
-+ if (size != sizeof(struct ip_set_req_nethash)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash),
-+ size);
-+ return -EINVAL;
-+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
-+ req->ip, req->cidr, hash_ip);
-+}
-+
-+static int
-+delip_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
-+ ? skb->nh.iph->saddr
-+ : skb->nh.iph->daddr);
-+
-+ if (map->cidr[0])
-+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
-+ return ret;
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ struct ip_set_req_nethash_create *req =
-+ (struct ip_set_req_nethash_create *) data;
-+ struct ip_set_nethash *map;
-+ uint16_t i;
-+
-+ if (size != sizeof(struct ip_set_req_nethash_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_nethash_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ if (req->hashsize < 1) {
-+ ip_set_printk("hashsize too small");
-+ return -ENOEXEC;
-+ }
-+ if (req->probes < 1) {
-+ ip_set_printk("probes too small");
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_nethash)
-+ + req->probes * sizeof(uint32_t));
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < req->probes; i++)
-+ get_random_bytes(((uint32_t *) map->initval)+i, 4);
-+ map->elements = 0;
-+ map->hashsize = req->hashsize;
-+ map->probes = req->probes;
-+ map->resize = req->resize;
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ harray_free(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
-+ memset(map->cidr, 0, 30 * sizeof(unsigned char));
-+ map->elements = 0;
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ struct ip_set_req_nethash_create *header =
-+ (struct ip_set_req_nethash_create *) data;
-+
-+ header->hashsize = map->hashsize;
-+ header->probes = map->probes;
-+ header->resize = map->resize;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+
-+ return (map->hashsize * sizeof(ip_set_ip_t));
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
-+ ip_set_ip_t i, *elem;
-+
-+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
-+ ((ip_set_ip_t *)data)[i] = *elem;
-+ }
-+}
-+
-+static struct ip_set_type ip_set_nethash = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_nethash),
-+ .addip = &addip,
-+ .addip_kernel = &addip_kernel,
-+ .retry = &retry,
-+ .delip = &delip,
-+ .delip_kernel = &delip_kernel,
-+ .testip = &testip,
-+ .testip_kernel = &testip_kernel,
-+ .header_size = sizeof(struct ip_set_req_nethash_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("nethash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_nethash);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_nethash);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ip_set_portmap.c linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_portmap.c
---- linux-2.6.21.1/net/ipv4/netfilter/ip_set_portmap.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ip_set_portmap.c 2007-06-08 16:29:31.833808500 -0500
-@@ -0,0 +1,334 @@
-+/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing a port set type as a bitmap */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_portmap.h>
-+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+ struct iphdr *iph = skb->nh.iph;
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+
-+static inline int
-+__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+
-+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_port, map->members);
-+}
-+
-+static int
-+testport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __testport(set, req->port, hash_port);
-+}
-+
-+static int
-+testport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ int res;
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
-+ if (port == INVALID_PORT)
-+ return 0;
-+
-+ res = __testport(set, port, hash_port);
-+
-+ return (res < 0 ? 0 : res);
-+}
-+
-+static inline int
-+__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
-+}
-+
-+static int
-+addport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __addport(set, req->port, hash_port);
-+}
-+
-+static int
-+addport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __addport(set, port, hash_port);
-+}
-+
-+static inline int
-+__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ if (port < map->first_port || port > map->last_port)
-+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_port, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
-+}
-+
-+static int
-+delport(struct ip_set *set, const void *data, size_t size,
-+ ip_set_ip_t *hash_port)
-+{
-+ struct ip_set_req_portmap *req =
-+ (struct ip_set_req_portmap *) data;
-+
-+ if (size != sizeof(struct ip_set_req_portmap)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap),
-+ size);
-+ return -EINVAL;
-+ }
-+ return __delport(set, req->port, hash_port);
-+}
-+
-+static int
-+delport_kernel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_port,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
-+ if (port == INVALID_PORT)
-+ return -EINVAL;
-+
-+ return __delport(set, port, hash_port);
-+}
-+
-+static int create(struct ip_set *set, const void *data, size_t size)
-+{
-+ int newbytes;
-+ struct ip_set_req_portmap_create *req =
-+ (struct ip_set_req_portmap_create *) data;
-+ struct ip_set_portmap *map;
-+
-+ if (size != sizeof(struct ip_set_req_portmap_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %zu)",
-+ sizeof(struct ip_set_req_portmap_create),
-+ size);
-+ return -EINVAL;
-+ }
-+
-+ DP("from %u to %u", req->from, req->to);
-+
-+ if (req->from > req->to) {
-+ DP("bad port range");
-+ return -ENOEXEC;
-+ }
-+
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big (max %d ports)",
-+ MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %d bytes",
-+ sizeof(struct ip_set_portmap));
-+ return -ENOMEM;
-+ }
-+ map->first_port = req->from;
-+ map->last_port = req->to;
-+ newbytes = bitmap_bytes(req->from, req->to);
-+ map->members = kmalloc(newbytes, GFP_KERNEL);
-+ if (!map->members) {
-+ DP("out of memory for %d bytes", newbytes);
-+ kfree(map);
-+ return -ENOMEM;
-+ }
-+ memset(map->members, 0, newbytes);
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void destroy(struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ kfree(map->members);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void flush(struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
-+}
-+
-+static void list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ struct ip_set_req_portmap_create *header =
-+ (struct ip_set_req_portmap_create *) data;
-+
-+ DP("list_header %u %u", map->first_port, map->last_port);
-+
-+ header->from = map->first_port;
-+ header->to = map->last_port;
-+}
-+
-+static int list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+
-+ return bitmap_bytes(map->first_port, map->last_port);
-+}
-+
-+static void list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
-+ int bytes = bitmap_bytes(map->first_port, map->last_port);
-+
-+ memcpy(data, map->members, bytes);
-+}
-+
-+static struct ip_set_type ip_set_portmap = {
-+ .typename = SETTYPE_NAME,
-+ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
-+ .protocol_version = IP_SET_PROTOCOL_VERSION,
-+ .create = &create,
-+ .destroy = &destroy,
-+ .flush = &flush,
-+ .reqsize = sizeof(struct ip_set_req_portmap),
-+ .addip = &addport,
-+ .addip_kernel = &addport_kernel,
-+ .delip = &delport,
-+ .delip_kernel = &delport_kernel,
-+ .testip = &testport,
-+ .testip_kernel = &testport_kernel,
-+ .header_size = sizeof(struct ip_set_req_portmap_create),
-+ .list_header = &list_header,
-+ .list_members_size = &list_members_size,
-+ .list_members = &list_members,
-+ .me = THIS_MODULE,
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
-+
-+static int __init init(void)
-+{
-+ return ip_set_register_set_type(&ip_set_portmap);
-+}
-+
-+static void __exit fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_portmap);
-+}
-+
-+module_init(init);
-+module_exit(fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ipt_set.c linux-2.6.21.1.new/net/ipv4/netfilter/ipt_set.c
---- linux-2.6.21.1/net/ipv4/netfilter/ipt_set.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ipt_set.c 2007-06-08 16:29:31.833808500 -0500
-@@ -0,0 +1,150 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module to match an IP set. */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ipt_set.h>
-+
-+static inline int
-+match_set(const struct ipt_set_info *info,
-+ const struct sk_buff *skb,
-+ int inv)
-+{
-+ if (ip_set_testip_kernel(info->index, skb, info->flags))
-+ inv = !inv;
-+ return inv;
-+}
-+
-+static int
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+ const void *matchinfo,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ int offset, unsigned int protoff, int *hotdrop)
-+#else
-+ int offset, int *hotdrop)
-+#endif
-+{
-+ const struct ipt_set_info_match *info = matchinfo;
-+
-+ return match_set(&info->match_set,
-+ skb,
-+ info->match_set.flags[0] & IPSET_MATCH_INV);
-+}
-+
-+static int
-+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *inf,
-+#else
-+ const struct ipt_ip *ip,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+ void *matchinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int matchsize,
-+#endif
-+ unsigned int hook_mask)
-+{
-+ struct ipt_set_info_match *info =
-+ (struct ipt_set_info_match *) matchinfo;
-+ ip_set_id_t index;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
-+ ip_set_printk("invalid matchsize %d", matchsize);
-+ return 0;
-+ }
-+#endif
-+
-+ index = ip_set_get_byindex(info->match_set.index);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("Cannot find set indentified by id %u to match",
-+ info->match_set.index);
-+ return 0; /* error */
-+ }
-+ if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
-+ ip_set_printk("That's nasty!");
-+ return 0; /* error */
-+ }
-+
-+ return 1;
-+}
-+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_match *match,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *matchinfo, unsigned int matchsize)
-+#else
-+ void *matchinfo)
-+#endif
-+{
-+ struct ipt_set_info_match *info = matchinfo;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
-+ ip_set_printk("invalid matchsize %d", matchsize);
-+ return;
-+ }
-+#endif
-+ ip_set_put(info->match_set.index);
-+}
-+
-+static struct ipt_match set_match = {
-+ .name = "set",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+ .family = AF_INET,
-+#endif
-+ .match = &match,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ .matchsize = sizeof(struct ipt_set_info_match),
-+#endif
-+ .checkentry = &checkentry,
-+ .destroy = &destroy,
-+ .me = THIS_MODULE
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptables IP set match module");
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_match xt_register_match
-+#define ipt_unregister_match xt_unregister_match
-+#endif
-+
-+static int __init ipt_ipset_init(void)
-+{
-+ return ipt_register_match(&set_match);
-+}
-+
-+static void __exit ipt_ipset_fini(void)
-+{
-+ ipt_unregister_match(&set_match);
-+}
-+
-+module_init(ipt_ipset_init);
-+module_exit(ipt_ipset_fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/ipt_SET.c linux-2.6.21.1.new/net/ipv4/netfilter/ipt_SET.c
---- linux-2.6.21.1/net/ipv4/netfilter/ipt_SET.c 1969-12-31 18:00:00.000000000 -0600
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/ipt_SET.c 2007-06-08 16:29:31.833808500 -0500
-@@ -0,0 +1,168 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* ipt_SET.c - netfilter target to manipulate IP sets */
-+
-+#include <linux/types.h>
-+#include <linux/ip.h>
-+#include <linux/timer.h>
-+#include <linux/module.h>
-+#include <linux/netfilter.h>
-+#include <linux/netdevice.h>
-+#include <linux/if.h>
-+#include <linux/inetdevice.h>
-+#include <linux/version.h>
-+#include <net/protocol.h>
-+#include <net/checksum.h>
-+#include <linux/netfilter_ipv4.h>
-+#include <linux/netfilter_ipv4/ip_nat_rule.h>
-+#include <linux/netfilter_ipv4/ipt_set.h>
-+
-+static unsigned int
-+target(struct sk_buff **pskb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ unsigned int hooknum,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ const void *targinfo,
-+ void *userinfo)
-+#else
-+ const void *targinfo)
-+#endif
-+{
-+ const struct ipt_set_info_target *info = targinfo;
-+
-+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_addip_kernel(info->add_set.index,
-+ *pskb,
-+ info->add_set.flags);
-+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_delip_kernel(info->del_set.index,
-+ *pskb,
-+ info->del_set.flags);
-+
-+ return IPT_CONTINUE;
-+}
-+
-+static int
-+checkentry(const char *tablename,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
-+ const void *e,
-+#else
-+ const struct ipt_entry *e,
-+#endif
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+ void *targinfo,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int targinfosize,
-+#endif
-+ unsigned int hook_mask)
-+{
-+ struct ipt_set_info_target *info =
-+ (struct ipt_set_info_target *) targinfo;
-+ ip_set_id_t index;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
-+ DP("bad target info size %u", targinfosize);
-+ return 0;
-+ }
-+#endif
-+
-+ if (info->add_set.index != IP_SET_INVALID_ID) {
-+ index = ip_set_get_byindex(info->add_set.index);
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("cannot find add_set index %u as target",
-+ info->add_set.index);
-+ return 0; /* error */
-+ }
-+ }
-+
-+ if (info->del_set.index != IP_SET_INVALID_ID) {
-+ index = ip_set_get_byindex(info->del_set.index);
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("cannot find del_set index %u as target",
-+ info->del_set.index);
-+ return 0; /* error */
-+ }
-+ }
-+ if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
-+ || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
-+ ip_set_printk("That's nasty!");
-+ return 0; /* error */
-+ }
-+
-+ return 1;
-+}
-+
-+static void destroy(
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ const struct xt_target *target,
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ void *targetinfo, unsigned int targetsize)
-+#else
-+ void *targetinfo)
-+#endif
-+{
-+ struct ipt_set_info_target *info = targetinfo;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
-+ ip_set_printk("invalid targetsize %d", targetsize);
-+ return;
-+ }
-+#endif
-+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->add_set.index);
-+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put(info->del_set.index);
-+}
-+
-+static struct ipt_target SET_target = {
-+ .name = "SET",
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+ .family = AF_INET,
-+#endif
-+ .target = target,
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
-+ .targetsize = sizeof(struct ipt_set_info_target),
-+#endif
-+ .checkentry = checkentry,
-+ .destroy = destroy,
-+ .me = THIS_MODULE
-+};
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptables IP set target module");
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
-+#define ipt_register_target xt_register_target
-+#define ipt_unregister_target xt_unregister_target
-+#endif
-+
-+static int __init ipt_SET_init(void)
-+{
-+ return ipt_register_target(&SET_target);
-+}
-+
-+static void __exit ipt_SET_fini(void)
-+{
-+ ipt_unregister_target(&SET_target);
-+}
-+
-+module_init(ipt_SET_init);
-+module_exit(ipt_SET_fini);
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/Kconfig linux-2.6.21.1.new/net/ipv4/netfilter/Kconfig
---- linux-2.6.21.1/net/ipv4/netfilter/Kconfig 2007-04-27 16:49:26.000000000 -0500
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/Kconfig 2007-06-08 16:29:31.833808500 -0500
-@@ -657,5 +657,114 @@
- Allows altering the ARP packet payload: source and destination
- hardware and network addresses.
-
-+config IP_NF_SET
-+ tristate "IP set support"
-+ depends on INET && NETFILTER
-+ help
-+ This option adds IP set support to the kernel.
-+ In order to define and use sets, you need the userspace utility
-+ ipset(8).
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_MAX
-+ int "Maximum number of IP sets"
-+ default 256
-+ range 2 65534
-+ depends on IP_NF_SET
-+ help
-+ You can define here default value of the maximum number
-+ of IP sets for the kernel.
-+
-+ The value can be overriden by the 'max_sets' module
-+ parameter of the 'ip_set' module.
-+
-+config IP_NF_SET_HASHSIZE
-+ int "Hash size for bindings of IP sets"
-+ default 1024
-+ depends on IP_NF_SET
-+ help
-+ You can define here default value of the hash size for
-+ bindings of IP sets.
-+
-+ The value can be overriden by the 'hash_size' module
-+ parameter of the 'ip_set' module.
-+
-+config IP_NF_SET_IPMAP
-+ tristate "ipmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_MACIPMAP
-+ tristate "macipmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the macipmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_PORTMAP
-+ tristate "portmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the portmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPHASH
-+ tristate "iphash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iphash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_NETHASH
-+ tristate "nethash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the nethash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPPORTHASH
-+ tristate "ipporthash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipporthash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPTREE
-+ tristate "iptree set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iptree set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_MATCH_SET
-+ tristate "set match support"
-+ depends on IP_NF_SET
-+ help
-+ Set matching matches against given IP sets.
-+ You need the ipset utility to create and set up the sets.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_TARGET_SET
-+ tristate "SET target support"
-+ depends on IP_NF_SET
-+ help
-+ The SET target makes possible to add/delete entries
-+ in IP sets.
-+ You need the ipset utility to create and set up the sets.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+
- endmenu
-
-diff -ruN linux-2.6.21.1/net/ipv4/netfilter/Makefile linux-2.6.21.1.new/net/ipv4/netfilter/Makefile
---- linux-2.6.21.1/net/ipv4/netfilter/Makefile 2007-04-27 16:49:26.000000000 -0500
-+++ linux-2.6.21.1.new/net/ipv4/netfilter/Makefile 2007-06-08 16:29:31.837808750 -0500
-@@ -90,6 +90,7 @@
- obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
- obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
- obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
-+obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
- obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
-
- # targets
-@@ -105,6 +106,17 @@
- obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
- obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
- obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
-+obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
-+
-+# sets
-+obj-$(CONFIG_IP_NF_SET) += ip_set.o
-+obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
-+obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
-+obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
-+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
-+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
-+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
-+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
-
- # generic ARP tables
- obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
--- /dev/null
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,498 @@
++#ifndef _IP_SET_H
++#define _IP_SET_H
++
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#if 0
++#define IP_SET_DEBUG
++#endif
++
++/*
++ * A sockopt of such quality has hardly ever been seen before on the open
++ * market! This little beauty, hardly ever used: above 64, so it's
++ * traditionally used for firewalling, not touched (even once!) by the
++ * 2.0, 2.2 and 2.4 kernels!
++ *
++ * Comes with its own certificate of authenticity, valid anywhere in the
++ * Free world!
++ *
++ * Rusty, 19.4.2000
++ */
++#define SO_IP_SET 83
++
++/*
++ * Heavily modify by Joakim Axelsson 08.03.2002
++ * - Made it more modulebased
++ *
++ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
++ * - bindings added
++ * - in order to "deal with" backward compatibility, renamed to ipset
++ */
++
++/*
++ * Used so that the kernel module and ipset-binary can match their versions
++ */
++#define IP_SET_PROTOCOL_VERSION 2
++
++#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
++
++/* Lets work with our own typedef for representing an IP address.
++ * We hope to make the code more portable, possibly to IPv6...
++ *
++ * The representation works in HOST byte order, because most set types
++ * will perform arithmetic operations and compare operations.
++ *
++ * For now the type is an uint32_t.
++ *
++ * Make sure to ONLY use the functions when translating and parsing
++ * in order to keep the host byte order and make it more portable:
++ * parse_ip()
++ * parse_mask()
++ * parse_ipandmask()
++ * ip_tostring()
++ * (Joakim: where are they???)
++ */
++
++typedef uint32_t ip_set_ip_t;
++
++/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
++ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
++ */
++typedef uint16_t ip_set_id_t;
++
++#define IP_SET_INVALID_ID 65535
++
++/* How deep we follow bindings */
++#define IP_SET_MAX_BINDINGS 6
++
++/*
++ * Option flags for kernel operations (ipt_set_info)
++ */
++#define IPSET_SRC 0x01 /* Source match/add */
++#define IPSET_DST 0x02 /* Destination match/add */
++#define IPSET_MATCH_INV 0x04 /* Inverse matching */
++
++/*
++ * Set features
++ */
++#define IPSET_TYPE_IP 0x01 /* IP address type of set */
++#define IPSET_TYPE_PORT 0x02 /* Port type of set */
++#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
++#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
++
++/* Reserved keywords */
++#define IPSET_TOKEN_DEFAULT ":default:"
++#define IPSET_TOKEN_ALL ":all:"
++
++/* SO_IP_SET operation constants, and their request struct types.
++ *
++ * Operation ids:
++ * 0-99: commands with version checking
++ * 100-199: add/del/test/bind/unbind
++ * 200-299: list, save, restore
++ */
++
++/* Single shot operations:
++ * version, create, destroy, flush, rename and swap
++ *
++ * Sets are identified by name.
++ */
++
++#define IP_SET_REQ_STD \
++ unsigned op; \
++ unsigned version; \
++ char name[IP_SET_MAXNAMELEN]
++
++#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
++struct ip_set_req_create {
++ IP_SET_REQ_STD;
++ char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
++struct ip_set_req_std {
++ IP_SET_REQ_STD;
++};
++
++#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
++/* Uses ip_set_req_std */
++
++#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
++/* Uses ip_set_req_create */
++
++#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
++/* Uses ip_set_req_create */
++
++union ip_set_name_index {
++ char name[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++};
++
++#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
++struct ip_set_req_get_set {
++ unsigned op;
++ unsigned version;
++ union ip_set_name_index set;
++};
++
++#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
++/* Uses ip_set_req_get_set */
++
++#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
++struct ip_set_req_version {
++ unsigned op;
++ unsigned version;
++};
++
++/* Double shots operations:
++ * add, del, test, bind and unbind.
++ *
++ * First we query the kernel to get the index and type of the target set,
++ * then issue the command. Validity of IP is checked in kernel in order
++ * to minimalize sockopt operations.
++ */
++
++/* Get minimal set data for add/del/test/bind/unbind IP */
++#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
++struct ip_set_req_adt_get {
++ unsigned op;
++ unsigned version;
++ union ip_set_name_index set;
++ char typename[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_REQ_BYINDEX \
++ unsigned op; \
++ ip_set_id_t index;
++
++struct ip_set_req_adt {
++ IP_SET_REQ_BYINDEX;
++};
++
++#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
++/* Uses ip_set_req_adt, with type specific addage */
++
++#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++struct ip_set_req_bind {
++ IP_SET_REQ_BYINDEX;
++ char binding[IP_SET_MAXNAMELEN];
++};
++
++#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
++/* Uses ip_set_req_bind, with type speficic addage
++ * index = 0 means unbinding for all sets */
++
++#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
++/* Uses ip_set_req_bind, with type specific addage */
++
++/* Multiple shots operations: list, save, restore.
++ *
++ * - check kernel version and query the max number of sets
++ * - get the basic information on all sets
++ * and size required for the next step
++ * - get actual set data: header, data, bindings
++ */
++
++/* Get max_sets and the index of a queried set
++ */
++#define IP_SET_OP_MAX_SETS 0x00000020
++struct ip_set_req_max_sets {
++ unsigned op;
++ unsigned version;
++ ip_set_id_t max_sets; /* max_sets */
++ ip_set_id_t sets; /* real number of sets */
++ union ip_set_name_index set; /* index of set if name used */
++};
++
++/* Get the id and name of the sets plus size for next step */
++#define IP_SET_OP_LIST_SIZE 0x00000201
++#define IP_SET_OP_SAVE_SIZE 0x00000202
++struct ip_set_req_setnames {
++ unsigned op;
++ ip_set_id_t index; /* set to list/save */
++ size_t size; /* size to get setdata/bindings */
++ /* followed by sets number of struct ip_set_name_list */
++};
++
++struct ip_set_name_list {
++ char name[IP_SET_MAXNAMELEN];
++ char typename[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++ ip_set_id_t id;
++};
++
++/* The actual list operation */
++#define IP_SET_OP_LIST 0x00000203
++struct ip_set_req_list {
++ IP_SET_REQ_BYINDEX;
++ /* sets number of struct ip_set_list in reply */
++};
++
++struct ip_set_list {
++ ip_set_id_t index;
++ ip_set_id_t binding;
++ u_int32_t ref;
++ size_t header_size; /* Set header data of header_size */
++ size_t members_size; /* Set members data of members_size */
++ size_t bindings_size; /* Set bindings data of bindings_size */
++};
++
++struct ip_set_hash_list {
++ ip_set_ip_t ip;
++ ip_set_id_t binding;
++};
++
++/* The save operation */
++#define IP_SET_OP_SAVE 0x00000204
++/* Uses ip_set_req_list, in the reply replaced by
++ * sets number of struct ip_set_save plus a marker
++ * ip_set_save followed by ip_set_hash_save structures.
++ */
++struct ip_set_save {
++ ip_set_id_t index;
++ ip_set_id_t binding;
++ size_t header_size; /* Set header data of header_size */
++ size_t members_size; /* Set members data of members_size */
++};
++
++/* At restoring, ip == 0 means default binding for the given set: */
++struct ip_set_hash_save {
++ ip_set_ip_t ip;
++ ip_set_id_t id;
++ ip_set_id_t binding;
++};
++
++/* The restore operation */
++#define IP_SET_OP_RESTORE 0x00000205
++/* Uses ip_set_req_setnames followed by ip_set_restore structures
++ * plus a marker ip_set_restore, followed by ip_set_hash_save
++ * structures.
++ */
++struct ip_set_restore {
++ char name[IP_SET_MAXNAMELEN];
++ char typename[IP_SET_MAXNAMELEN];
++ ip_set_id_t index;
++ size_t header_size; /* Create data of header_size */
++ size_t members_size; /* Set members data of members_size */
++};
++
++static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
++{
++ return 4 * ((((b - a + 8) / 8) + 3) / 4);
++}
++
++#ifdef __KERNEL__
++
++#define ip_set_printk(format, args...) \
++ do { \
++ printk("%s: %s: ", __FILE__, __FUNCTION__); \
++ printk(format "\n" , ## args); \
++ } while (0)
++
++#if defined(IP_SET_DEBUG)
++#define DP(format, args...) \
++ do { \
++ printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
++ printk(format "\n" , ## args); \
++ } while (0)
++#define IP_SET_ASSERT(x) \
++ do { \
++ if (!(x)) \
++ printk("IP_SET_ASSERT: %s:%i(%s)\n", \
++ __FILE__, __LINE__, __FUNCTION__); \
++ } while (0)
++#else
++#define DP(format, args...)
++#define IP_SET_ASSERT(x)
++#endif
++
++struct ip_set;
++
++/*
++ * The ip_set_type definition - one per set type, e.g. "ipmap".
++ *
++ * Each individual set has a pointer, set->type, going to one
++ * of these structures. Function pointers inside the structure implement
++ * the real behaviour of the sets.
++ *
++ * If not mentioned differently, the implementation behind the function
++ * pointers of a set_type, is expected to return 0 if ok, and a negative
++ * errno (e.g. -EINVAL) on error.
++ */
++struct ip_set_type {
++ struct list_head list; /* next in list of set types */
++
++ /* test for IP in set (kernel: iptables -m set src|dst)
++ * return 0 if not in set, 1 if in set.
++ */
++ int (*testip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* test for IP in set (userspace: ipset -T set IP)
++ * return 0 if not in set, 1 if in set.
++ */
++ int (*testip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /*
++ * Size of the data structure passed by when
++ * adding/deletin/testing an entry.
++ */
++ size_t reqsize;
++
++ /* Add IP into set (userspace: ipset -A set IP)
++ * Return -EEXIST if the address is already in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address was not already in the set, 0 is returned.
++ */
++ int (*addip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
++ * Return -EEXIST if the address is already in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address was not already in the set, 0 is returned.
++ */
++ int (*addip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* remove IP from set (userspace: ipset -D set --entry x)
++ * Return -EEXIST if the address is NOT in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address really was in the set, 0 is returned.
++ */
++ int (*delip) (struct ip_set *set,
++ const void *data, size_t size,
++ ip_set_ip_t *ip);
++
++ /* remove IP from set (kernel: iptables ... -j SET --entry x)
++ * Return -EEXIST if the address is NOT in the set,
++ * and -ERANGE if the address lies outside the set bounds.
++ * If the address really was in the set, 0 is returned.
++ */
++ int (*delip_kernel) (struct ip_set *set,
++ const struct sk_buff * skb,
++ ip_set_ip_t *ip,
++ const u_int32_t *flags,
++ unsigned char index);
++
++ /* new set creation - allocated type specific items
++ */
++ int (*create) (struct ip_set *set,
++ const void *data, size_t size);
++
++ /* retry the operation after successfully tweaking the set
++ */
++ int (*retry) (struct ip_set *set);
++
++ /* set destruction - free type specific items
++ * There is no return value.
++ * Can be called only when child sets are destroyed.
++ */
++ void (*destroy) (struct ip_set *set);
++
++ /* set flushing - reset all bits in the set, or something similar.
++ * There is no return value.
++ */
++ void (*flush) (struct ip_set *set);
++
++ /* Listing: size needed for header
++ */
++ size_t header_size;
++
++ /* Listing: Get the header
++ *
++ * Fill in the information in "data".
++ * This function is always run after list_header_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
++ */
++ void (*list_header) (const struct ip_set *set,
++ void *data);
++
++ /* Listing: Get the size for the set members
++ */
++ int (*list_members_size) (const struct ip_set *set);
++
++ /* Listing: Get the set members
++ *
++ * Fill in the information in "data".
++ * This function is always run after list_member_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
++ */
++ void (*list_members) (const struct ip_set *set,
++ void *data);
++
++ char typename[IP_SET_MAXNAMELEN];
++ unsigned char features;
++ int protocol_version;
++
++ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
++ struct module *me;
++};
++
++extern int ip_set_register_set_type(struct ip_set_type *set_type);
++extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
++
++/* A generic ipset */
++struct ip_set {
++ char name[IP_SET_MAXNAMELEN]; /* the name of the set */
++ rwlock_t lock; /* lock for concurrency control */
++ ip_set_id_t id; /* set id for swapping */
++ ip_set_id_t binding; /* default binding for the set */
++ atomic_t ref; /* in kernel and in hash references */
++ struct ip_set_type *type; /* the set types */
++ void *data; /* pooltype specific data */
++};
++
++/* Structure to bind set elements to sets */
++struct ip_set_hash {
++ struct list_head list; /* list of clashing entries in hash */
++ ip_set_ip_t ip; /* ip from set */
++ ip_set_id_t id; /* set id */
++ ip_set_id_t binding; /* set we bind the element to */
++};
++
++/* register and unregister set references */
++extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
++extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
++extern void ip_set_put(ip_set_id_t id);
++
++/* API for iptables set match, and SET target */
++extern void ip_set_addip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern void ip_set_delip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++extern int ip_set_testip_kernel(ip_set_id_t id,
++ const struct sk_buff *skb,
++ const u_int32_t *flags);
++
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_H*/
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iphash.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_iphash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iphash.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,30 @@
++#ifndef __IP_SET_IPHASH_H
++#define __IP_SET_IPHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iphash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iphash {
++ ip_set_ip_t *members; /* the iphash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t netmask; /* netmask */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_iphash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t netmask;
++};
++
++struct ip_set_req_iphash {
++ ip_set_ip_t ip;
++};
++
++#endif /* __IP_SET_IPHASH_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipmap.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_ipmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipmap.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,56 @@
++#ifndef __IP_SET_IPMAP_H
++#define __IP_SET_IPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipmap"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_ipmap {
++ void *members; /* the ipmap proper */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ ip_set_ip_t netmask; /* subnet netmask */
++ ip_set_ip_t sizeid; /* size of set in IPs */
++ ip_set_ip_t hosts; /* number of hosts in a subnet */
++};
++
++struct ip_set_req_ipmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++ ip_set_ip_t netmask;
++};
++
++struct ip_set_req_ipmap {
++ ip_set_ip_t ip;
++};
++
++unsigned int
++mask_to_bits(ip_set_ip_t mask)
++{
++ unsigned int bits = 32;
++ ip_set_ip_t maskaddr;
++
++ if (mask == 0xFFFFFFFF)
++ return bits;
++
++ maskaddr = 0xFFFFFFFE;
++ while (--bits >= 0 && maskaddr != mask)
++ maskaddr <<= 1;
++
++ return bits;
++}
++
++ip_set_ip_t
++range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
++{
++ ip_set_ip_t mask = 0xFFFFFFFE;
++
++ *bits = 32;
++ while (--(*bits) >= 0 && mask && (to & mask) != from)
++ mask <<= 1;
++
++ return mask;
++}
++
++#endif /* __IP_SET_IPMAP_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_ipporthash.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipporthash.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_ipporthash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipporthash.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,34 @@
++#ifndef __IP_SET_IPPORTHASH_H
++#define __IP_SET_IPPORTHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "ipporthash"
++#define MAX_RANGE 0x0000FFFF
++#define INVALID_PORT (MAX_RANGE + 1)
++
++struct ip_set_ipporthash {
++ ip_set_ip_t *members; /* the ipporthash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_ipporthash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_ipporthash {
++ ip_set_ip_t ip;
++ ip_set_ip_t port;
++};
++
++#endif /* __IP_SET_IPPORTHASH_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptree.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_iptree.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptree.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,40 @@
++#ifndef __IP_SET_IPTREE_H
++#define __IP_SET_IPTREE_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iptree"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_iptreed {
++ unsigned long expires[256]; /* x.x.x.ADDR */
++};
++
++struct ip_set_iptreec {
++ struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
++};
++
++struct ip_set_iptreeb {
++ struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
++};
++
++struct ip_set_iptree {
++ unsigned int timeout;
++ unsigned int gc_interval;
++#ifdef __KERNEL__
++ uint32_t elements; /* number of elements */
++ struct timer_list gc;
++ struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
++#endif
++};
++
++struct ip_set_req_iptree_create {
++ unsigned int timeout;
++};
++
++struct ip_set_req_iptree {
++ ip_set_ip_t ip;
++ unsigned int timeout;
++};
++
++#endif /* __IP_SET_IPTREE_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_iptreemap.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptreemap.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_iptreemap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptreemap.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,40 @@
++#ifndef __IP_SET_IPTREEMAP_H
++#define __IP_SET_IPTREEMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "iptreemap"
++
++#ifdef __KERNEL__
++struct ip_set_iptreemap_d {
++ unsigned char bitmap[32]; /* x.x.x.y */
++};
++
++struct ip_set_iptreemap_c {
++ struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
++};
++
++struct ip_set_iptreemap_b {
++ struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
++ unsigned char dirty[32];
++};
++#endif
++
++struct ip_set_iptreemap {
++ unsigned int gc_interval;
++#ifdef __KERNEL__
++ struct timer_list gc;
++ struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
++#endif
++};
++
++struct ip_set_req_iptreemap_create {
++ unsigned int gc_interval;
++};
++
++struct ip_set_req_iptreemap {
++ ip_set_ip_t start;
++ ip_set_ip_t end;
++};
++
++#endif /* __IP_SET_IPTREEMAP_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_jhash.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_jhash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_jhash.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,148 @@
++#ifndef _LINUX_IPSET_JHASH_H
++#define _LINUX_IPSET_JHASH_H
++
++/* This is a copy of linux/jhash.h but the types u32/u8 are changed
++ * to __u32/__u8 so that the header file can be included into
++ * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
++ */
++
++/* jhash.h: Jenkins hash support.
++ *
++ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
++ *
++ * http://burtleburtle.net/bob/hash/
++ *
++ * These are the credits from Bob's sources:
++ *
++ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
++ * hash(), hash2(), hash3, and mix() are externally useful functions.
++ * Routines to test the hash are included if SELF_TEST is defined.
++ * You can use this free for any purpose. It has no warranty.
++ *
++ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
++ *
++ * I've modified Bob's hash to be useful in the Linux kernel, and
++ * any bugs present are surely my fault. -DaveM
++ */
++
++/* NOTE: Arguments are modified. */
++#define __jhash_mix(a, b, c) \
++{ \
++ a -= b; a -= c; a ^= (c>>13); \
++ b -= c; b -= a; b ^= (a<<8); \
++ c -= a; c -= b; c ^= (b>>13); \
++ a -= b; a -= c; a ^= (c>>12); \
++ b -= c; b -= a; b ^= (a<<16); \
++ c -= a; c -= b; c ^= (b>>5); \
++ a -= b; a -= c; a ^= (c>>3); \
++ b -= c; b -= a; b ^= (a<<10); \
++ c -= a; c -= b; c ^= (b>>15); \
++}
++
++/* The golden ration: an arbitrary value */
++#define JHASH_GOLDEN_RATIO 0x9e3779b9
++
++/* The most generic version, hashes an arbitrary sequence
++ * of bytes. No alignment or length assumptions are made about
++ * the input key.
++ */
++static inline __u32 jhash(void *key, __u32 length, __u32 initval)
++{
++ __u32 a, b, c, len;
++ __u8 *k = key;
++
++ len = length;
++ a = b = JHASH_GOLDEN_RATIO;
++ c = initval;
++
++ while (len >= 12) {
++ a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
++ b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
++ c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
++
++ __jhash_mix(a,b,c);
++
++ k += 12;
++ len -= 12;
++ }
++
++ c += length;
++ switch (len) {
++ case 11: c += ((__u32)k[10]<<24);
++ case 10: c += ((__u32)k[9]<<16);
++ case 9 : c += ((__u32)k[8]<<8);
++ case 8 : b += ((__u32)k[7]<<24);
++ case 7 : b += ((__u32)k[6]<<16);
++ case 6 : b += ((__u32)k[5]<<8);
++ case 5 : b += k[4];
++ case 4 : a += ((__u32)k[3]<<24);
++ case 3 : a += ((__u32)k[2]<<16);
++ case 2 : a += ((__u32)k[1]<<8);
++ case 1 : a += k[0];
++ };
++
++ __jhash_mix(a,b,c);
++
++ return c;
++}
++
++/* A special optimized version that handles 1 or more of __u32s.
++ * The length parameter here is the number of __u32s in the key.
++ */
++static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
++{
++ __u32 a, b, c, len;
++
++ a = b = JHASH_GOLDEN_RATIO;
++ c = initval;
++ len = length;
++
++ while (len >= 3) {
++ a += k[0];
++ b += k[1];
++ c += k[2];
++ __jhash_mix(a, b, c);
++ k += 3; len -= 3;
++ }
++
++ c += length * 4;
++
++ switch (len) {
++ case 2 : b += k[1];
++ case 1 : a += k[0];
++ };
++
++ __jhash_mix(a,b,c);
++
++ return c;
++}
++
++
++/* A special ultra-optimized versions that knows they are hashing exactly
++ * 3, 2 or 1 word(s).
++ *
++ * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
++ * done at the end is not done here.
++ */
++static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
++{
++ a += JHASH_GOLDEN_RATIO;
++ b += JHASH_GOLDEN_RATIO;
++ c += initval;
++
++ __jhash_mix(a, b, c);
++
++ return c;
++}
++
++static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
++{
++ return jhash_3words(a, b, 0, initval);
++}
++
++static inline __u32 jhash_1word(__u32 a, __u32 initval)
++{
++ return jhash_3words(a, 0, 0, initval);
++}
++
++#endif /* _LINUX_IPSET_JHASH_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_macipmap.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_macipmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_macipmap.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,38 @@
++#ifndef __IP_SET_MACIPMAP_H
++#define __IP_SET_MACIPMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "macipmap"
++#define MAX_RANGE 0x0000FFFF
++
++/* general flags */
++#define IPSET_MACIP_MATCHUNSET 1
++
++/* per ip flags */
++#define IPSET_MACIP_ISSET 1
++
++struct ip_set_macipmap {
++ void *members; /* the macipmap proper */
++ ip_set_ip_t first_ip; /* host byte order, included in range */
++ ip_set_ip_t last_ip; /* host byte order, included in range */
++ u_int32_t flags;
++};
++
++struct ip_set_req_macipmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++ u_int32_t flags;
++};
++
++struct ip_set_req_macipmap {
++ ip_set_ip_t ip;
++ unsigned char ethernet[ETH_ALEN];
++};
++
++struct ip_set_macip {
++ unsigned short flags;
++ unsigned char ethernet[ETH_ALEN];
++};
++
++#endif /* __IP_SET_MACIPMAP_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_malloc.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_malloc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_malloc.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,116 @@
++#ifndef _IP_SET_MALLOC_H
++#define _IP_SET_MALLOC_H
++
++#ifdef __KERNEL__
++
++/* Memory allocation and deallocation */
++static size_t max_malloc_size = 0;
++
++static inline void init_max_malloc_size(void)
++{
++#define CACHE(x) max_malloc_size = x;
++#include <linux/kmalloc_sizes.h>
++#undef CACHE
++}
++
++static inline void * ip_set_malloc(size_t bytes)
++{
++ if (bytes > max_malloc_size)
++ return vmalloc(bytes);
++ else
++ return kmalloc(bytes, GFP_KERNEL);
++}
++
++static inline void ip_set_free(void * data, size_t bytes)
++{
++ if (bytes > max_malloc_size)
++ vfree(data);
++ else
++ kfree(data);
++}
++
++struct harray {
++ size_t max_elements;
++ void *arrays[0];
++};
++
++static inline void *
++harray_malloc(size_t hashsize, size_t typesize, int flags)
++{
++ struct harray *harray;
++ size_t max_elements, size, i, j;
++
++ if (!max_malloc_size)
++ init_max_malloc_size();
++
++ if (typesize > max_malloc_size)
++ return NULL;
++
++ max_elements = max_malloc_size/typesize;
++ size = hashsize/max_elements;
++ if (hashsize % max_elements)
++ size++;
++
++ /* Last pointer signals end of arrays */
++ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
++ flags);
++
++ if (!harray)
++ return NULL;
++
++ for (i = 0; i < size - 1; i++) {
++ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
++ if (!harray->arrays[i])
++ goto undo;
++ memset(harray->arrays[i], 0, max_elements * typesize);
++ }
++ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
++ flags);
++ if (!harray->arrays[i])
++ goto undo;
++ memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
++
++ harray->max_elements = max_elements;
++ harray->arrays[size] = NULL;
++
++ return (void *)harray;
++
++ undo:
++ for (j = 0; j < i; j++) {
++ kfree(harray->arrays[j]);
++ }
++ kfree(harray);
++ return NULL;
++}
++
++static inline void harray_free(void *h)
++{
++ struct harray *harray = (struct harray *) h;
++ size_t i;
++
++ for (i = 0; harray->arrays[i] != NULL; i++)
++ kfree(harray->arrays[i]);
++ kfree(harray);
++}
++
++static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
++{
++ struct harray *harray = (struct harray *) h;
++ size_t i;
++
++ for (i = 0; harray->arrays[i+1] != NULL; i++)
++ memset(harray->arrays[i], 0, harray->max_elements * typesize);
++ memset(harray->arrays[i], 0,
++ (hashsize - i * harray->max_elements) * typesize);
++}
++
++#define HARRAY_ELEM(h, type, which) \
++({ \
++ struct harray *__h = (struct harray *)(h); \
++ ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
++ + (which)%(__h)->max_elements); \
++})
++
++#endif /* __KERNEL__ */
++
++#endif /*_IP_SET_MALLOC_H*/
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_nethash.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_nethash.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_nethash.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,55 @@
++#ifndef __IP_SET_NETHASH_H
++#define __IP_SET_NETHASH_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "nethash"
++#define MAX_RANGE 0x0000FFFF
++
++struct ip_set_nethash {
++ ip_set_ip_t *members; /* the nethash proper */
++ uint32_t elements; /* number of elements */
++ uint32_t hashsize; /* hash size */
++ uint16_t probes; /* max number of probes */
++ uint16_t resize; /* resize factor in percent */
++ unsigned char cidr[30]; /* CIDR sizes */
++ void *initval[0]; /* initvals for jhash_1word */
++};
++
++struct ip_set_req_nethash_create {
++ uint32_t hashsize;
++ uint16_t probes;
++ uint16_t resize;
++};
++
++struct ip_set_req_nethash {
++ ip_set_ip_t ip;
++ unsigned char cidr;
++};
++
++static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
++
++static inline ip_set_ip_t
++pack(ip_set_ip_t ip, unsigned char cidr)
++{
++ ip_set_ip_t addr, *paddr = &addr;
++ unsigned char n, t, *a;
++
++ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
++#ifdef __KERNEL__
++ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
++#endif
++ n = cidr / 8;
++ t = cidr % 8;
++ a = &((unsigned char *)paddr)[n];
++ *a = *a /(1 << (8 - t)) + shifts[t];
++#ifdef __KERNEL__
++ DP("n: %u, t: %u, a: %u", n, t, *a);
++ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
++ HIPQUAD(ip), cidr, NIPQUAD(addr));
++#endif
++
++ return ntohl(addr);
++}
++
++#endif /* __IP_SET_NETHASH_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_portmap.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ip_set_portmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ip_set_portmap.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,25 @@
++#ifndef __IP_SET_PORTMAP_H
++#define __IP_SET_PORTMAP_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++#define SETTYPE_NAME "portmap"
++#define MAX_RANGE 0x0000FFFF
++#define INVALID_PORT (MAX_RANGE + 1)
++
++struct ip_set_portmap {
++ void *members; /* the portmap proper */
++ ip_set_ip_t first_port; /* host byte order, included in range */
++ ip_set_ip_t last_port; /* host byte order, included in range */
++};
++
++struct ip_set_req_portmap_create {
++ ip_set_ip_t from;
++ ip_set_ip_t to;
++};
++
++struct ip_set_req_portmap {
++ ip_set_ip_t port;
++};
++
++#endif /* __IP_SET_PORTMAP_H */
+diff -Nru ./linux-2.6.21.5/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ipt_set.h
+--- ./linux-2.6.21.5/include/linux/netfilter_ipv4/ipt_set.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/include/linux/netfilter_ipv4/ipt_set.h 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,21 @@
++#ifndef _IPT_SET_H
++#define _IPT_SET_H
++
++#include <linux/netfilter_ipv4/ip_set.h>
++
++struct ipt_set_info {
++ ip_set_id_t index;
++ u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
++};
++
++/* match info */
++struct ipt_set_info_match {
++ struct ipt_set_info match_set;
++};
++
++struct ipt_set_info_target {
++ struct ipt_set_info add_set;
++ struct ipt_set_info del_set;
++};
++
++#endif /*_IPT_SET_H*/
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,2003 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module for IP set management */
++
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++#include <linux/config.h>
++#endif
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/kmod.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/random.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <asm/semaphore.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++
++#define ASSERT_READ_LOCK(x)
++#define ASSERT_WRITE_LOCK(x)
++#include <linux/netfilter_ipv4/ip_set.h>
++
++static struct list_head set_type_list; /* all registered sets */
++static struct ip_set **ip_set_list; /* all individual sets */
++static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
++static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
++static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
++static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
++static struct list_head *ip_set_hash; /* hash of bindings */
++static unsigned int ip_set_hash_random; /* random seed */
++
++/*
++ * Sets are identified either by the index in ip_set_list or by id.
++ * The id never changes and is used to find a key in the hash.
++ * The index may change by swapping and used at all other places
++ * (set/SET netfilter modules, binding value, etc.)
++ *
++ * Userspace requests are serialized by ip_set_mutex and sets can
++ * be deleted only from userspace. Therefore ip_set_list locking
++ * must obey the following rules:
++ *
++ * - kernel requests: read and write locking mandatory
++ * - user requests: read locking optional, write locking mandatory
++ */
++
++static inline void
++__ip_set_get(ip_set_id_t index)
++{
++ atomic_inc(&ip_set_list[index]->ref);
++}
++
++static inline void
++__ip_set_put(ip_set_id_t index)
++{
++ atomic_dec(&ip_set_list[index]->ref);
++}
++
++/*
++ * Binding routines
++ */
++
++static inline struct ip_set_hash *
++__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
++{
++ struct ip_set_hash *set_hash;
++
++ list_for_each_entry(set_hash, &ip_set_hash[key], list)
++ if (set_hash->id == id && set_hash->ip == ip)
++ return set_hash;
++
++ return NULL;
++}
++
++static ip_set_id_t
++ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++
++ ASSERT_READ_LOCK(&ip_set_lock);
++ IP_SET_ASSERT(ip_set_list[id]);
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++
++ set_hash = __ip_set_find(key, id, ip);
++
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip),
++ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
++}
++
++static inline void
++__set_hash_del(struct ip_set_hash *set_hash)
++{
++ ASSERT_WRITE_LOCK(&ip_set_lock);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++
++ __ip_set_put(set_hash->binding);
++ list_del(&set_hash->list);
++ kfree(set_hash);
++}
++
++static int
++ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++
++ IP_SET_ASSERT(ip_set_list[id]);
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++ write_lock_bh(&ip_set_lock);
++ set_hash = __ip_set_find(key, id, ip);
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip),
++ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
++
++ if (set_hash != NULL)
++ __set_hash_del(set_hash);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++}
++
++static int
++ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
++{
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ % ip_set_bindings_hash_size;
++ struct ip_set_hash *set_hash;
++ int ret = 0;
++
++ IP_SET_ASSERT(ip_set_list[id]);
++ IP_SET_ASSERT(ip_set_list[binding]);
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ HIPQUAD(ip), ip_set_list[binding]->name);
++ write_lock_bh(&ip_set_lock);
++ set_hash = __ip_set_find(key, id, ip);
++ if (!set_hash) {
++ set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
++ if (!set_hash) {
++ ret = -ENOMEM;
++ goto unlock;
++ }
++ INIT_LIST_HEAD(&set_hash->list);
++ set_hash->id = id;
++ set_hash->ip = ip;
++ list_add(&set_hash->list, &ip_set_hash[key]);
++ } else {
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ DP("overwrite binding: %s",
++ ip_set_list[set_hash->binding]->name);
++ __ip_set_put(set_hash->binding);
++ }
++ set_hash->binding = binding;
++ __ip_set_get(set_hash->binding);
++ DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
++ key, id, ip_set_list[id]->name,
++ HIPQUAD(ip), binding, ip_set_list[binding]->name);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return ret;
++}
++
++#define FOREACH_HASH_DO(fn, args...) \
++({ \
++ ip_set_id_t __key; \
++ struct ip_set_hash *__set_hash; \
++ \
++ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
++ list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
++ fn(__set_hash , ## args); \
++ } \
++})
++
++#define FOREACH_HASH_RW_DO(fn, args...) \
++({ \
++ ip_set_id_t __key; \
++ struct ip_set_hash *__set_hash, *__n; \
++ \
++ ASSERT_WRITE_LOCK(&ip_set_lock); \
++ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
++ list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
++ fn(__set_hash , ## args); \
++ } \
++})
++
++/* Add, del and test set entries from kernel */
++
++#define follow_bindings(index, set, ip) \
++((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
++ || (index = (set)->binding) != IP_SET_INVALID_ID)
++
++int
++ip_set_testip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ read_lock_bh(&set->lock);
++ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
++ read_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while (res > 0
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++
++ return res;
++}
++
++void
++ip_set_addip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ retry:
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ write_lock_bh(&set->lock);
++ res = set->type->addip_kernel(set, skb, &ip, flags, i++);
++ write_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while ((res == 0 || res == -EEXIST)
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++
++ if (res == -EAGAIN
++ && set->type->retry
++ && (res = set->type->retry(set)) == 0)
++ goto retry;
++}
++
++void
++ip_set_delip_kernel(ip_set_id_t index,
++ const struct sk_buff *skb,
++ const u_int32_t *flags)
++{
++ struct ip_set *set;
++ ip_set_ip_t ip;
++ int res;
++ unsigned char i = 0;
++
++ IP_SET_ASSERT(flags[i]);
++ read_lock_bh(&ip_set_lock);
++ do {
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ DP("set %s, index %u", set->name, index);
++ write_lock_bh(&set->lock);
++ res = set->type->delip_kernel(set, skb, &ip, flags, i++);
++ write_unlock_bh(&set->lock);
++ i += !!(set->type->features & IPSET_DATA_DOUBLE);
++ } while ((res == 0 || res == -EEXIST)
++ && flags[i]
++ && follow_bindings(index, set, ip));
++ read_unlock_bh(&ip_set_lock);
++}
++
++/* Register and deregister settype */
++
++static inline struct ip_set_type *
++find_set_type(const char *name)
++{
++ struct ip_set_type *set_type;
++
++ list_for_each_entry(set_type, &set_type_list, list)
++ if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
++ return set_type;
++ return NULL;
++}
++
++int
++ip_set_register_set_type(struct ip_set_type *set_type)
++{
++ int ret = 0;
++
++ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
++ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
++ set_type->typename,
++ set_type->protocol_version,
++ IP_SET_PROTOCOL_VERSION);
++ return -EINVAL;
++ }
++
++ write_lock_bh(&ip_set_lock);
++ if (find_set_type(set_type->typename)) {
++ /* Duplicate! */
++ ip_set_printk("'%s' already registered!",
++ set_type->typename);
++ ret = -EINVAL;
++ goto unlock;
++ }
++ if (!try_module_get(THIS_MODULE)) {
++ ret = -EFAULT;
++ goto unlock;
++ }
++ list_add(&set_type->list, &set_type_list);
++ DP("'%s' registered.", set_type->typename);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return ret;
++}
++
++void
++ip_set_unregister_set_type(struct ip_set_type *set_type)
++{
++ write_lock_bh(&ip_set_lock);
++ if (!find_set_type(set_type->typename)) {
++ ip_set_printk("'%s' not registered?",
++ set_type->typename);
++ goto unlock;
++ }
++ list_del(&set_type->list);
++ module_put(THIS_MODULE);
++ DP("'%s' unregistered.", set_type->typename);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++
++}
++
++/*
++ * Userspace routines
++ */
++
++/*
++ * Find set by name, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byname(const char *name)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ down(&ip_set_app_mutex);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strcmp(ip_set_list[i]->name, name) == 0) {
++ __ip_set_get(i);
++ index = i;
++ break;
++ }
++ }
++ up(&ip_set_app_mutex);
++ return index;
++}
++
++/*
++ * Find set by index, reference it once. The reference makes sure the
++ * thing pointed to, does not go away under our feet. Drop the reference
++ * later, using ip_set_put().
++ */
++ip_set_id_t
++ip_set_get_byindex(ip_set_id_t index)
++{
++ down(&ip_set_app_mutex);
++
++ if (index >= ip_set_max)
++ return IP_SET_INVALID_ID;
++
++ if (ip_set_list[index])
++ __ip_set_get(index);
++ else
++ index = IP_SET_INVALID_ID;
++
++ up(&ip_set_app_mutex);
++ return index;
++}
++
++/*
++ * If the given set pointer points to a valid set, decrement
++ * reference count by 1. The caller shall not assume the index
++ * to be valid, after calling this function.
++ */
++void ip_set_put(ip_set_id_t index)
++{
++ down(&ip_set_app_mutex);
++ if (ip_set_list[index])
++ __ip_set_put(index);
++ up(&ip_set_app_mutex);
++}
++
++/* Find a set by name or index */
++static ip_set_id_t
++ip_set_find_byname(const char *name)
++{
++ ip_set_id_t i, index = IP_SET_INVALID_ID;
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strcmp(ip_set_list[i]->name, name) == 0) {
++ index = i;
++ break;
++ }
++ }
++ return index;
++}
++
++static ip_set_id_t
++ip_set_find_byindex(ip_set_id_t index)
++{
++ if (index >= ip_set_max || ip_set_list[index] == NULL)
++ index = IP_SET_INVALID_ID;
++
++ return index;
++}
++
++/*
++ * Add, del, test, bind and unbind
++ */
++
++static inline int
++__ip_set_testip(struct ip_set *set,
++ const void *data,
++ size_t size,
++ ip_set_ip_t *ip)
++{
++ int res;
++
++ read_lock_bh(&set->lock);
++ res = set->type->testip(set, data, size, ip);
++ read_unlock_bh(&set->lock);
++
++ return res;
++}
++
++static int
++__ip_set_addip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ do {
++ write_lock_bh(&set->lock);
++ res = set->type->addip(set, data, size, &ip);
++ write_unlock_bh(&set->lock);
++ } while (res == -EAGAIN
++ && set->type->retry
++ && (res = set->type->retry(set)) == 0);
++
++ return res;
++}
++
++static int
++ip_set_addip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++
++ return __ip_set_addip(index,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt));
++}
++
++static int
++ip_set_delip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ write_lock_bh(&set->lock);
++ res = set->type->delip(set,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt),
++ &ip);
++ write_unlock_bh(&set->lock);
++
++ return res;
++}
++
++static int
++ip_set_testip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_adt),
++ size - sizeof(struct ip_set_req_adt),
++ &ip);
++
++ return (res > 0 ? -EEXIST : res);
++}
++
++static int
++ip_set_bindip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_req_bind *req_bind;
++ ip_set_id_t binding;
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of a set */
++ char *binding_name;
++
++ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++ return -EINVAL;
++
++ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
++ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ binding = ip_set_find_byname(binding_name);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ write_lock_bh(&ip_set_lock);
++ /* Sets as binding values are referenced */
++ if (set->binding != IP_SET_INVALID_ID)
++ __ip_set_put(set->binding);
++ set->binding = binding;
++ __ip_set_get(set->binding);
++ write_unlock_bh(&ip_set_lock);
++
++ return 0;
++ }
++ binding = ip_set_find_byname(req_bind->binding);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++ DP("set %s, ip: %u.%u.%u.%u, binding %s",
++ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++
++ if (res >= 0)
++ res = ip_set_hash_add(set->id, ip, binding);
++
++ return res;
++}
++
++#define FOREACH_SET_DO(fn, args...) \
++({ \
++ ip_set_id_t __i; \
++ struct ip_set *__set; \
++ \
++ for (__i = 0; __i < ip_set_max; __i++) { \
++ __set = ip_set_list[__i]; \
++ if (__set != NULL) \
++ fn(__set , ##args); \
++ } \
++})
++
++static inline void
++__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
++{
++ if (set_hash->id == id)
++ __set_hash_del(set_hash);
++}
++
++static inline void
++__unbind_default(struct ip_set *set)
++{
++ if (set->binding != IP_SET_INVALID_ID) {
++ /* Sets as binding values are referenced */
++ __ip_set_put(set->binding);
++ set->binding = IP_SET_INVALID_ID;
++ }
++}
++
++static int
++ip_set_unbindip(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set;
++ struct ip_set_req_bind *req_bind;
++ ip_set_ip_t ip;
++ int res;
++
++ DP("");
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ DP("%u %s", index, req_bind->binding);
++ if (index == IP_SET_INVALID_ID) {
++ /* unbind :all: */
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of sets */
++ write_lock_bh(&ip_set_lock);
++ FOREACH_SET_DO(__unbind_default);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all bindings of all sets*/
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ }
++ DP("unreachable reached!");
++ return -EINVAL;
++ }
++
++ set = ip_set_list[index];
++ IP_SET_ASSERT(set);
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of set */
++ ip_set_id_t binding = ip_set_find_byindex(set->binding);
++
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ write_lock_bh(&ip_set_lock);
++ /* Sets in hash values are referenced */
++ __ip_set_put(set->binding);
++ set->binding = IP_SET_INVALID_ID;
++ write_unlock_bh(&ip_set_lock);
++
++ return 0;
++ } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all bindings */
++
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++ }
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++
++ DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
++ if (res >= 0)
++ res = ip_set_hash_del(set->id, ip);
++
++ return res;
++}
++
++static int
++ip_set_testbind(ip_set_id_t index,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_req_bind *req_bind;
++ ip_set_id_t binding;
++ ip_set_ip_t ip;
++ int res;
++
++ IP_SET_ASSERT(set);
++ if (size < sizeof(struct ip_set_req_bind))
++ return -EINVAL;
++
++ req_bind = (struct ip_set_req_bind *) data;
++ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
++ /* Default binding of set */
++ char *binding_name;
++
++ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
++ return -EINVAL;
++
++ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
++ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ binding = ip_set_find_byname(binding_name);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++ res = (set->binding == binding) ? -EEXIST : 0;
++
++ return res;
++ }
++ binding = ip_set_find_byname(req_bind->binding);
++ if (binding == IP_SET_INVALID_ID)
++ return -ENOENT;
++
++
++ res = __ip_set_testip(set,
++ data + sizeof(struct ip_set_req_bind),
++ size - sizeof(struct ip_set_req_bind),
++ &ip);
++ DP("set %s, ip: %u.%u.%u.%u, binding %s",
++ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
++
++ if (res >= 0)
++ res = (ip_set_find_in_hash(set->id, ip) == binding)
++ ? -EEXIST : 0;
++
++ return res;
++}
++
++static struct ip_set_type *
++find_set_type_rlock(const char *typename)
++{
++ struct ip_set_type *type;
++
++ read_lock_bh(&ip_set_lock);
++ type = find_set_type(typename);
++ if (type == NULL)
++ read_unlock_bh(&ip_set_lock);
++
++ return type;
++}
++
++static int
++find_free_id(const char *name,
++ ip_set_id_t *index,
++ ip_set_id_t *id)
++{
++ ip_set_id_t i;
++
++ *id = IP_SET_INVALID_ID;
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] == NULL) {
++ if (*id == IP_SET_INVALID_ID)
++ *id = *index = i;
++ } else if (strcmp(name, ip_set_list[i]->name) == 0)
++ /* Name clash */
++ return -EEXIST;
++ }
++ if (*id == IP_SET_INVALID_ID)
++ /* No free slot remained */
++ return -ERANGE;
++ /* Check that index is usable as id (swapping) */
++ check:
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && ip_set_list[i]->id == *id) {
++ *id = i;
++ goto check;
++ }
++ }
++ return 0;
++}
++
++/*
++ * Create a set
++ */
++static int
++ip_set_create(const char *name,
++ const char *typename,
++ ip_set_id_t restore,
++ const void *data,
++ size_t size)
++{
++ struct ip_set *set;
++ ip_set_id_t index = 0, id;
++ int res = 0;
++
++ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
++ /*
++ * First, and without any locks, allocate and initialize
++ * a normal base set structure.
++ */
++ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
++ if (!set)
++ return -ENOMEM;
++ set->lock = RW_LOCK_UNLOCKED;
++ strncpy(set->name, name, IP_SET_MAXNAMELEN);
++ set->binding = IP_SET_INVALID_ID;
++ atomic_set(&set->ref, 0);
++
++ /*
++ * Next, take the &ip_set_lock, check that we know the type,
++ * and take a reference on the type, to make sure it
++ * stays available while constructing our new set.
++ *
++ * After referencing the type, we drop the &ip_set_lock,
++ * and let the new set construction run without locks.
++ */
++ set->type = find_set_type_rlock(typename);
++ if (set->type == NULL) {
++ /* Try loading the module */
++ char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
++ strcpy(modulename, "ip_set_");
++ strcat(modulename, typename);
++ DP("try to load %s", modulename);
++ request_module(modulename);
++ set->type = find_set_type_rlock(typename);
++ }
++ if (set->type == NULL) {
++ ip_set_printk("no set type '%s', set '%s' not created",
++ typename, name);
++ res = -ENOENT;
++ goto out;
++ }
++ if (!try_module_get(set->type->me)) {
++ read_unlock_bh(&ip_set_lock);
++ res = -EFAULT;
++ goto out;
++ }
++ read_unlock_bh(&ip_set_lock);
++
++ /*
++ * Without holding any locks, create private part.
++ */
++ res = set->type->create(set, data, size);
++ if (res != 0)
++ goto put_out;
++
++ /* BTW, res==0 here. */
++
++ /*
++ * Here, we have a valid, constructed set. &ip_set_lock again,
++ * find free id/index and check that it is not already in
++ * ip_set_list.
++ */
++ write_lock_bh(&ip_set_lock);
++ if ((res = find_free_id(set->name, &index, &id)) != 0) {
++ DP("no free id!");
++ goto cleanup;
++ }
++
++ /* Make sure restore gets the same index */
++ if (restore != IP_SET_INVALID_ID && index != restore) {
++ DP("Can't restore, sets are screwed up");
++ res = -ERANGE;
++ goto cleanup;
++ }
++
++ /*
++ * Finally! Add our shiny new set to the list, and be done.
++ */
++ DP("create: '%s' created with index %u, id %u!", set->name, index, id);
++ set->id = id;
++ ip_set_list[index] = set;
++ write_unlock_bh(&ip_set_lock);
++ return res;
++
++ cleanup:
++ write_unlock_bh(&ip_set_lock);
++ set->type->destroy(set);
++ put_out:
++ module_put(set->type->me);
++ out:
++ kfree(set);
++ return res;
++}
++
++/*
++ * Destroy a given existing set
++ */
++static void
++ip_set_destroy_set(ip_set_id_t index)
++{
++ struct ip_set *set = ip_set_list[index];
++
++ IP_SET_ASSERT(set);
++ DP("set: %s", set->name);
++ write_lock_bh(&ip_set_lock);
++ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
++ if (set->binding != IP_SET_INVALID_ID)
++ __ip_set_put(set->binding);
++ ip_set_list[index] = NULL;
++ write_unlock_bh(&ip_set_lock);
++
++ /* Must call it without holding any lock */
++ set->type->destroy(set);
++ module_put(set->type->me);
++ kfree(set);
++}
++
++/*
++ * Destroy a set - or all sets
++ * Sets must not be referenced/used.
++ */
++static int
++ip_set_destroy(ip_set_id_t index)
++{
++ ip_set_id_t i;
++
++ /* ref modification always protected by the mutex */
++ if (index != IP_SET_INVALID_ID) {
++ if (atomic_read(&ip_set_list[index]->ref))
++ return -EBUSY;
++ ip_set_destroy_set(index);
++ } else {
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && (atomic_read(&ip_set_list[i]->ref)))
++ return -EBUSY;
++ }
++
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL)
++ ip_set_destroy_set(i);
++ }
++ }
++ return 0;
++}
++
++static void
++ip_set_flush_set(struct ip_set *set)
++{
++ DP("set: %s %u", set->name, set->id);
++
++ write_lock_bh(&set->lock);
++ set->type->flush(set);
++ write_unlock_bh(&set->lock);
++}
++
++/*
++ * Flush data in a set - or in all sets
++ */
++static int
++ip_set_flush(ip_set_id_t index)
++{
++ if (index != IP_SET_INVALID_ID) {
++ IP_SET_ASSERT(ip_set_list[index]);
++ ip_set_flush_set(ip_set_list[index]);
++ } else
++ FOREACH_SET_DO(ip_set_flush_set);
++
++ return 0;
++}
++
++/* Rename a set */
++static int
++ip_set_rename(ip_set_id_t index, const char *name)
++{
++ struct ip_set *set = ip_set_list[index];
++ ip_set_id_t i;
++ int res = 0;
++
++ DP("set: %s to %s", set->name, name);
++ write_lock_bh(&ip_set_lock);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL
++ && strncmp(ip_set_list[i]->name,
++ name,
++ IP_SET_MAXNAMELEN - 1) == 0) {
++ res = -EEXIST;
++ goto unlock;
++ }
++ }
++ strncpy(set->name, name, IP_SET_MAXNAMELEN);
++ unlock:
++ write_unlock_bh(&ip_set_lock);
++ return res;
++}
++
++/*
++ * Swap two sets so that name/index points to the other.
++ * References are also swapped.
++ */
++static int
++ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
++{
++ struct ip_set *from = ip_set_list[from_index];
++ struct ip_set *to = ip_set_list[to_index];
++ char from_name[IP_SET_MAXNAMELEN];
++ u_int32_t from_ref;
++
++ DP("set: %s to %s", from->name, to->name);
++ /* Features must not change. Artifical restriction. */
++ if (from->type->features != to->type->features)
++ return -ENOEXEC;
++
++ /* No magic here: ref munging protected by the mutex */
++ write_lock_bh(&ip_set_lock);
++ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
++ from_ref = atomic_read(&from->ref);
++
++ strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
++ atomic_set(&from->ref, atomic_read(&to->ref));
++ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
++ atomic_set(&to->ref, from_ref);
++
++ ip_set_list[from_index] = to;
++ ip_set_list[to_index] = from;
++
++ write_unlock_bh(&ip_set_lock);
++ return 0;
++}
++
++/*
++ * List set data
++ */
++
++static inline void
++__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
++ ip_set_id_t id, size_t *size)
++{
++ if (set_hash->id == id)
++ *size += sizeof(struct ip_set_hash_list);
++}
++
++static inline void
++__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
++ ip_set_id_t id, size_t *size)
++{
++ if (set_hash->id == id)
++ *size += sizeof(struct ip_set_hash_save);
++}
++
++static inline void
++__set_hash_bindings(struct ip_set_hash *set_hash,
++ ip_set_id_t id, void *data, int *used)
++{
++ if (set_hash->id == id) {
++ struct ip_set_hash_list *hash_list =
++ (struct ip_set_hash_list *)(data + *used);
++
++ hash_list->ip = set_hash->ip;
++ hash_list->binding = set_hash->binding;
++ *used += sizeof(struct ip_set_hash_list);
++ }
++}
++
++static int ip_set_list_set(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ struct ip_set *set = ip_set_list[index];
++ struct ip_set_list *set_list;
++
++ /* Pointer to our header */
++ set_list = (struct ip_set_list *) (data + *used);
++
++ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
++
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_list) > len)
++ goto not_enough_mem;
++ *used += sizeof(struct ip_set_list);
++
++ read_lock_bh(&set->lock);
++ /* Get and ensure set specific header size */
++ set_list->header_size = set->type->header_size;
++ if (*used + set_list->header_size > len)
++ goto unlock_set;
++
++ /* Fill in the header */
++ set_list->index = index;
++ set_list->binding = set->binding;
++ set_list->ref = atomic_read(&set->ref);
++
++ /* Fill in set spefific header data */
++ set->type->list_header(set, data + *used);
++ *used += set_list->header_size;
++
++ /* Get and ensure set specific members size */
++ set_list->members_size = set->type->list_members_size(set);
++ if (*used + set_list->members_size > len)
++ goto unlock_set;
++
++ /* Fill in set spefific members data */
++ set->type->list_members(set, data + *used);
++ *used += set_list->members_size;
++ read_unlock_bh(&set->lock);
++
++ /* Bindings */
++
++ /* Get and ensure set specific bindings size */
++ set_list->bindings_size = 0;
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ set->id, &set_list->bindings_size);
++ if (*used + set_list->bindings_size > len)
++ goto not_enough_mem;
++
++ /* Fill in set spefific bindings data */
++ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
++
++ return 0;
++
++ unlock_set:
++ read_unlock_bh(&set->lock);
++ not_enough_mem:
++ DP("not enough mem, try again");
++ return -EAGAIN;
++}
++
++/*
++ * Save sets
++ */
++static int ip_set_save_set(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ struct ip_set *set;
++ struct ip_set_save *set_save;
++
++ /* Pointer to our header */
++ set_save = (struct ip_set_save *) (data + *used);
++
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_save) > len)
++ goto not_enough_mem;
++ *used += sizeof(struct ip_set_save);
++
++ set = ip_set_list[index];
++ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ data, data + *used);
++
++ read_lock_bh(&set->lock);
++ /* Get and ensure set specific header size */
++ set_save->header_size = set->type->header_size;
++ if (*used + set_save->header_size > len)
++ goto unlock_set;
++
++ /* Fill in the header */
++ set_save->index = index;
++ set_save->binding = set->binding;
++
++ /* Fill in set spefific header data */
++ set->type->list_header(set, data + *used);
++ *used += set_save->header_size;
++
++ DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
++ set_save->header_size, data, data + *used);
++ /* Get and ensure set specific members size */
++ set_save->members_size = set->type->list_members_size(set);
++ if (*used + set_save->members_size > len)
++ goto unlock_set;
++
++ /* Fill in set spefific members data */
++ set->type->list_members(set, data + *used);
++ *used += set_save->members_size;
++ read_unlock_bh(&set->lock);
++ DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
++ set_save->members_size, data, data + *used);
++ return 0;
++
++ unlock_set:
++ read_unlock_bh(&set->lock);
++ not_enough_mem:
++ DP("not enough mem, try again");
++ return -EAGAIN;
++}
++
++static inline void
++__set_hash_save_bindings(struct ip_set_hash *set_hash,
++ ip_set_id_t id,
++ void *data,
++ int *used,
++ int len,
++ int *res)
++{
++ if (*res == 0
++ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
++ struct ip_set_hash_save *hash_save =
++ (struct ip_set_hash_save *)(data + *used);
++ /* Ensure bindings size */
++ if (*used + sizeof(struct ip_set_hash_save) > len) {
++ *res = -ENOMEM;
++ return;
++ }
++ hash_save->id = set_hash->id;
++ hash_save->ip = set_hash->ip;
++ hash_save->binding = set_hash->binding;
++ *used += sizeof(struct ip_set_hash_save);
++ }
++}
++
++static int ip_set_save_bindings(ip_set_id_t index,
++ void *data,
++ int *used,
++ int len)
++{
++ int res = 0;
++ struct ip_set_save *set_save;
++
++ DP("used %u, len %u", *used, len);
++ /* Get and ensure header size */
++ if (*used + sizeof(struct ip_set_save) > len)
++ return -ENOMEM;
++
++ /* Marker */
++ set_save = (struct ip_set_save *) (data + *used);
++ set_save->index = IP_SET_INVALID_ID;
++ set_save->header_size = 0;
++ set_save->members_size = 0;
++ *used += sizeof(struct ip_set_save);
++
++ DP("marker added used %u, len %u", *used, len);
++ /* Fill in bindings data */
++ if (index != IP_SET_INVALID_ID)
++ /* Sets are identified by id in hash */
++ index = ip_set_list[index]->id;
++ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
++
++ return res;
++}
++
++/*
++ * Restore sets
++ */
++static int ip_set_restore(void *data,
++ int len)
++{
++ int res = 0;
++ int line = 0, used = 0, members_size;
++ struct ip_set *set;
++ struct ip_set_hash_save *hash_save;
++ struct ip_set_restore *set_restore;
++ ip_set_id_t index;
++
++ /* Loop to restore sets */
++ while (1) {
++ line++;
++
++ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
++ /* Get and ensure header size */
++ if (used + sizeof(struct ip_set_restore) > len)
++ return line;
++ set_restore = (struct ip_set_restore *) (data + used);
++ used += sizeof(struct ip_set_restore);
++
++ /* Ensure data size */
++ if (used
++ + set_restore->header_size
++ + set_restore->members_size > len)
++ return line;
++
++ /* Check marker */
++ if (set_restore->index == IP_SET_INVALID_ID) {
++ line--;
++ goto bindings;
++ }
++
++ /* Try to create the set */
++ DP("restore %s %s", set_restore->name, set_restore->typename);
++ res = ip_set_create(set_restore->name,
++ set_restore->typename,
++ set_restore->index,
++ data + used,
++ set_restore->header_size);
++
++ if (res != 0)
++ return line;
++ used += set_restore->header_size;
++
++ index = ip_set_find_byindex(set_restore->index);
++ DP("index %u, restore_index %u", index, set_restore->index);
++ if (index != set_restore->index)
++ return line;
++ /* Try to restore members data */
++ set = ip_set_list[index];
++ members_size = 0;
++ DP("members_size %u reqsize %u",
++ set_restore->members_size, set->type->reqsize);
++ while (members_size + set->type->reqsize <=
++ set_restore->members_size) {
++ line++;
++ DP("members: %u, line %u", members_size, line);
++ res = __ip_set_addip(index,
++ data + used + members_size,
++ set->type->reqsize);
++ if (!(res == 0 || res == -EEXIST))
++ return line;
++ members_size += set->type->reqsize;
++ }
++
++ DP("members_size %u %u",
++ set_restore->members_size, members_size);
++ if (members_size != set_restore->members_size)
++ return line++;
++ used += set_restore->members_size;
++ }
++
++ bindings:
++ /* Loop to restore bindings */
++ while (used < len) {
++ line++;
++
++ DP("restore binding, line %u", line);
++ /* Get and ensure size */
++ if (used + sizeof(struct ip_set_hash_save) > len)
++ return line;
++ hash_save = (struct ip_set_hash_save *) (data + used);
++ used += sizeof(struct ip_set_hash_save);
++
++ /* hash_save->id is used to store the index */
++ index = ip_set_find_byindex(hash_save->id);
++ DP("restore binding index %u, id %u, %u -> %u",
++ index, hash_save->id, hash_save->ip, hash_save->binding);
++ if (index != hash_save->id)
++ return line;
++ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
++ DP("corrupt binding set index %u", hash_save->binding);
++ return line;
++ }
++ set = ip_set_list[hash_save->id];
++ /* Null valued IP means default binding */
++ if (hash_save->ip)
++ res = ip_set_hash_add(set->id,
++ hash_save->ip,
++ hash_save->binding);
++ else {
++ IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
++ write_lock_bh(&ip_set_lock);
++ set->binding = hash_save->binding;
++ __ip_set_get(set->binding);
++ write_unlock_bh(&ip_set_lock);
++ DP("default binding: %u", set->binding);
++ }
++ if (res != 0)
++ return line;
++ }
++ if (used != len)
++ return line;
++
++ return 0;
++}
++
++static int
++ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
++{
++ void *data;
++ int res = 0; /* Assume OK */
++ unsigned *op;
++ struct ip_set_req_adt *req_adt;
++ ip_set_id_t index = IP_SET_INVALID_ID;
++ int (*adtfn)(ip_set_id_t index,
++ const void *data, size_t size);
++ struct fn_table {
++ int (*fn)(ip_set_id_t index,
++ const void *data, size_t size);
++ } adtfn_table[] =
++ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
++ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
++ };
++
++ DP("optval=%d, user=%p, len=%d", optval, user, len);
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (optval != SO_IP_SET)
++ return -EBADF;
++ if (len <= sizeof(unsigned)) {
++ ip_set_printk("short userdata (want >%zu, got %u)",
++ sizeof(unsigned), len);
++ return -EINVAL;
++ }
++ data = vmalloc(len);
++ if (!data) {
++ DP("out of mem for %u bytes", len);
++ return -ENOMEM;
++ }
++ if (copy_from_user(data, user, len) != 0) {
++ res = -EFAULT;
++ goto done;
++ }
++ if (down_interruptible(&ip_set_app_mutex)) {
++ res = -EINTR;
++ goto done;
++ }
++
++ op = (unsigned *)data;
++ DP("op=%x", *op);
++
++ if (*op < IP_SET_OP_VERSION) {
++ /* Check the version at the beginning of operations */
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++ res = -EPROTO;
++ goto done;
++ }
++ }
++
++ switch (*op) {
++ case IP_SET_OP_CREATE:{
++ struct ip_set_req_create *req_create
++ = (struct ip_set_req_create *) data;
++
++ if (len < sizeof(struct ip_set_req_create)) {
++ ip_set_printk("short CREATE data (want >=%zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++ res = ip_set_create(req_create->name,
++ req_create->typename,
++ IP_SET_INVALID_ID,
++ data + sizeof(struct ip_set_req_create),
++ len - sizeof(struct ip_set_req_create));
++ goto done;
++ }
++ case IP_SET_OP_DESTROY:{
++ struct ip_set_req_std *req_destroy
++ = (struct ip_set_req_std *) data;
++
++ if (len != sizeof(struct ip_set_req_std)) {
++ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
++ sizeof(struct ip_set_req_std), len);
++ res = -EINVAL;
++ goto done;
++ }
++ if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
++ /* Destroy all sets */
++ index = IP_SET_INVALID_ID;
++ } else {
++ req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_destroy->name);
++
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++
++ res = ip_set_destroy(index);
++ goto done;
++ }
++ case IP_SET_OP_FLUSH:{
++ struct ip_set_req_std *req_flush =
++ (struct ip_set_req_std *) data;
++
++ if (len != sizeof(struct ip_set_req_std)) {
++ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
++ sizeof(struct ip_set_req_std), len);
++ res = -EINVAL;
++ goto done;
++ }
++ if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
++ /* Flush all sets */
++ index = IP_SET_INVALID_ID;
++ } else {
++ req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_flush->name);
++
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ res = ip_set_flush(index);
++ goto done;
++ }
++ case IP_SET_OP_RENAME:{
++ struct ip_set_req_create *req_rename
++ = (struct ip_set_req_create *) data;
++
++ if (len != sizeof(struct ip_set_req_create)) {
++ ip_set_printk("invalid RENAME data (want %zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ index = ip_set_find_byname(req_rename->name);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ res = ip_set_rename(index, req_rename->typename);
++ goto done;
++ }
++ case IP_SET_OP_SWAP:{
++ struct ip_set_req_create *req_swap
++ = (struct ip_set_req_create *) data;
++ ip_set_id_t to_index;
++
++ if (len != sizeof(struct ip_set_req_create)) {
++ ip_set_printk("invalid SWAP data (want %zu, got %u)",
++ sizeof(struct ip_set_req_create), len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
++
++ index = ip_set_find_byname(req_swap->name);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ to_index = ip_set_find_byname(req_swap->typename);
++ if (to_index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ res = ip_set_swap(index, to_index);
++ goto done;
++ }
++ default:
++ break; /* Set identified by id */
++ }
++
++ /* There we may have add/del/test/bind/unbind/test_bind operations */
++ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
++ res = -EBADMSG;
++ goto done;
++ }
++ adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
++
++ if (len < sizeof(struct ip_set_req_adt)) {
++ ip_set_printk("short data in adt request (want >=%zu, got %u)",
++ sizeof(struct ip_set_req_adt), len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_adt = (struct ip_set_req_adt *) data;
++
++ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
++ if (!(*op == IP_SET_OP_UNBIND_SET
++ && req_adt->index == IP_SET_INVALID_ID)) {
++ index = ip_set_find_byindex(req_adt->index);
++ if (index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ res = adtfn(index, data, len);
++
++ done:
++ up(&ip_set_app_mutex);
++ vfree(data);
++ if (res > 0)
++ res = 0;
++ DP("final result %d", res);
++ return res;
++}
++
++static int
++ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
++{
++ int res = 0;
++ unsigned *op;
++ ip_set_id_t index = IP_SET_INVALID_ID;
++ void *data;
++ int copylen = *len;
++
++ DP("optval=%d, user=%p, len=%d", optval, user, *len);
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (optval != SO_IP_SET)
++ return -EBADF;
++ if (*len < sizeof(unsigned)) {
++ ip_set_printk("short userdata (want >=%zu, got %d)",
++ sizeof(unsigned), *len);
++ return -EINVAL;
++ }
++ data = vmalloc(*len);
++ if (!data) {
++ DP("out of mem for %d bytes", *len);
++ return -ENOMEM;
++ }
++ if (copy_from_user(data, user, *len) != 0) {
++ res = -EFAULT;
++ goto done;
++ }
++ if (down_interruptible(&ip_set_app_mutex)) {
++ res = -EINTR;
++ goto done;
++ }
++
++ op = (unsigned *) data;
++ DP("op=%x", *op);
++
++ if (*op < IP_SET_OP_VERSION) {
++ /* Check the version at the beginning of operations */
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
++ res = -EPROTO;
++ goto done;
++ }
++ }
++
++ switch (*op) {
++ case IP_SET_OP_VERSION: {
++ struct ip_set_req_version *req_version =
++ (struct ip_set_req_version *) data;
++
++ if (*len != sizeof(struct ip_set_req_version)) {
++ ip_set_printk("invalid VERSION (want %zu, got %d)",
++ sizeof(struct ip_set_req_version),
++ *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_version->version = IP_SET_PROTOCOL_VERSION;
++ res = copy_to_user(user, req_version,
++ sizeof(struct ip_set_req_version));
++ goto done;
++ }
++ case IP_SET_OP_GET_BYNAME: {
++ struct ip_set_req_get_set *req_get
++ = (struct ip_set_req_get_set *) data;
++
++ if (*len != sizeof(struct ip_set_req_get_set)) {
++ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
++ sizeof(struct ip_set_req_get_set), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_get->set.name);
++ req_get->set.index = index;
++ goto copy;
++ }
++ case IP_SET_OP_GET_BYINDEX: {
++ struct ip_set_req_get_set *req_get
++ = (struct ip_set_req_get_set *) data;
++
++ if (*len != sizeof(struct ip_set_req_get_set)) {
++ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
++ sizeof(struct ip_set_req_get_set), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byindex(req_get->set.index);
++ strncpy(req_get->set.name,
++ index == IP_SET_INVALID_ID ? ""
++ : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
++ goto copy;
++ }
++ case IP_SET_OP_ADT_GET: {
++ struct ip_set_req_adt_get *req_get
++ = (struct ip_set_req_adt_get *) data;
++
++ if (*len != sizeof(struct ip_set_req_adt_get)) {
++ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
++ sizeof(struct ip_set_req_adt_get), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ index = ip_set_find_byname(req_get->set.name);
++ if (index != IP_SET_INVALID_ID) {
++ req_get->set.index = index;
++ strncpy(req_get->typename,
++ ip_set_list[index]->type->typename,
++ IP_SET_MAXNAMELEN - 1);
++ } else {
++ res = -ENOENT;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_MAX_SETS: {
++ struct ip_set_req_max_sets *req_max_sets
++ = (struct ip_set_req_max_sets *) data;
++ ip_set_id_t i;
++
++ if (*len != sizeof(struct ip_set_req_max_sets)) {
++ ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
++ sizeof(struct ip_set_req_max_sets), *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
++ req_max_sets->set.index = IP_SET_INVALID_ID;
++ } else {
++ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
++ req_max_sets->set.index =
++ ip_set_find_byname(req_max_sets->set.name);
++ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
++ res = -ENOENT;
++ goto done;
++ }
++ }
++ req_max_sets->max_sets = ip_set_max;
++ req_max_sets->sets = 0;
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] != NULL)
++ req_max_sets->sets++;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_LIST_SIZE:
++ case IP_SET_OP_SAVE_SIZE: {
++ struct ip_set_req_setnames *req_setnames
++ = (struct ip_set_req_setnames *) data;
++ struct ip_set_name_list *name_list;
++ struct ip_set *set;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_setnames)) {
++ ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_setnames), *len);
++ res = -EINVAL;
++ goto done;
++ }
++
++ req_setnames->size = 0;
++ used = sizeof(struct ip_set_req_setnames);
++ for (i = 0; i < ip_set_max; i++) {
++ if (ip_set_list[i] == NULL)
++ continue;
++ name_list = (struct ip_set_name_list *)
++ (data + used);
++ used += sizeof(struct ip_set_name_list);
++ if (used > copylen) {
++ res = -EAGAIN;
++ goto done;
++ }
++ set = ip_set_list[i];
++ /* Fill in index, name, etc. */
++ name_list->index = i;
++ name_list->id = set->id;
++ strncpy(name_list->name,
++ set->name,
++ IP_SET_MAXNAMELEN - 1);
++ strncpy(name_list->typename,
++ set->type->typename,
++ IP_SET_MAXNAMELEN - 1);
++ DP("filled %s of type %s, index %u\n",
++ name_list->name, name_list->typename,
++ name_list->index);
++ if (!(req_setnames->index == IP_SET_INVALID_ID
++ || req_setnames->index == i))
++ continue;
++ /* Update size */
++ switch (*op) {
++ case IP_SET_OP_LIST_SIZE: {
++ req_setnames->size += sizeof(struct ip_set_list)
++ + set->type->header_size
++ + set->type->list_members_size(set);
++ /* Sets are identified by id in the hash */
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ set->id, &req_setnames->size);
++ break;
++ }
++ case IP_SET_OP_SAVE_SIZE: {
++ req_setnames->size += sizeof(struct ip_set_save)
++ + set->type->header_size
++ + set->type->list_members_size(set);
++ FOREACH_HASH_DO(__set_hash_bindings_size_save,
++ set->id, &req_setnames->size);
++ break;
++ }
++ default:
++ break;
++ }
++ }
++ if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_LIST: {
++ struct ip_set_req_list *req_list
++ = (struct ip_set_req_list *) data;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_list)) {
++ ip_set_printk("short LIST (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_list), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ index = req_list->index;
++ if (index != IP_SET_INVALID_ID
++ && ip_set_find_byindex(index) != index) {
++ res = -ENOENT;
++ goto done;
++ }
++ used = 0;
++ if (index == IP_SET_INVALID_ID) {
++ /* List all sets */
++ for (i = 0; i < ip_set_max && res == 0; i++) {
++ if (ip_set_list[i] != NULL)
++ res = ip_set_list_set(i, data, &used, *len);
++ }
++ } else {
++ /* List an individual set */
++ res = ip_set_list_set(index, data, &used, *len);
++ }
++ if (res != 0)
++ goto done;
++ else if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_SAVE: {
++ struct ip_set_req_list *req_save
++ = (struct ip_set_req_list *) data;
++ ip_set_id_t i;
++ int used;
++
++ if (*len < sizeof(struct ip_set_req_list)) {
++ ip_set_printk("short SAVE (want >=%zu, got %d)",
++ sizeof(struct ip_set_req_list), *len);
++ res = -EINVAL;
++ goto done;
++ }
++ index = req_save->index;
++ if (index != IP_SET_INVALID_ID
++ && ip_set_find_byindex(index) != index) {
++ res = -ENOENT;
++ goto done;
++ }
++ used = 0;
++ if (index == IP_SET_INVALID_ID) {
++ /* Save all sets */
++ for (i = 0; i < ip_set_max && res == 0; i++) {
++ if (ip_set_list[i] != NULL)
++ res = ip_set_save_set(i, data, &used, *len);
++ }
++ } else {
++ /* Save an individual set */
++ res = ip_set_save_set(index, data, &used, *len);
++ }
++ if (res == 0)
++ res = ip_set_save_bindings(index, data, &used, *len);
++
++ if (res != 0)
++ goto done;
++ else if (copylen != used) {
++ res = -EAGAIN;
++ goto done;
++ }
++ goto copy;
++ }
++ case IP_SET_OP_RESTORE: {
++ struct ip_set_req_setnames *req_restore
++ = (struct ip_set_req_setnames *) data;
++ int line;
++
++ if (*len < sizeof(struct ip_set_req_setnames)
++ || *len != req_restore->size) {
++ ip_set_printk("invalid RESTORE (want =%zu, got %d)",
++ req_restore->size, *len);
++ res = -EINVAL;
++ goto done;
++ }
++ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
++ req_restore->size - sizeof(struct ip_set_req_setnames));
++ DP("ip_set_restore: %u", line);
++ if (line != 0) {
++ res = -EAGAIN;
++ req_restore->size = line;
++ copylen = sizeof(struct ip_set_req_setnames);
++ goto copy;
++ }
++ goto done;
++ }
++ default:
++ res = -EBADMSG;
++ goto done;
++ } /* end of switch(op) */
++
++ copy:
++ DP("set %s, copylen %u", index != IP_SET_INVALID_ID
++ && ip_set_list[index]
++ ? ip_set_list[index]->name
++ : ":all:", copylen);
++ res = copy_to_user(user, data, copylen);
++
++ done:
++ up(&ip_set_app_mutex);
++ vfree(data);
++ if (res > 0)
++ res = 0;
++ DP("final result %d", res);
++ return res;
++}
++
++static struct nf_sockopt_ops so_set = {
++ .pf = PF_INET,
++ .set_optmin = SO_IP_SET,
++ .set_optmax = SO_IP_SET + 1,
++ .set = &ip_set_sockfn_set,
++ .get_optmin = SO_IP_SET,
++ .get_optmax = SO_IP_SET + 1,
++ .get = &ip_set_sockfn_get,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ .owner = THIS_MODULE,
++#endif
++};
++
++static int max_sets, hash_size;
++module_param(max_sets, int, 0600);
++MODULE_PARM_DESC(max_sets, "maximal number of sets");
++module_param(hash_size, int, 0600);
++MODULE_PARM_DESC(hash_size, "hash size for bindings");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("module implementing core IP set support");
++
++static int __init ip_set_init(void)
++{
++ int res;
++ ip_set_id_t i;
++
++ get_random_bytes(&ip_set_hash_random, 4);
++ if (max_sets)
++ ip_set_max = max_sets;
++ ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
++ if (!ip_set_list) {
++ printk(KERN_ERR "Unable to create ip_set_list\n");
++ return -ENOMEM;
++ }
++ memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
++ if (hash_size)
++ ip_set_bindings_hash_size = hash_size;
++ ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
++ if (!ip_set_hash) {
++ printk(KERN_ERR "Unable to create ip_set_hash\n");
++ vfree(ip_set_list);
++ return -ENOMEM;
++ }
++ for (i = 0; i < ip_set_bindings_hash_size; i++)
++ INIT_LIST_HEAD(&ip_set_hash[i]);
++
++ INIT_LIST_HEAD(&set_type_list);
++
++ res = nf_register_sockopt(&so_set);
++ if (res != 0) {
++ ip_set_printk("SO_SET registry failed: %d", res);
++ vfree(ip_set_list);
++ vfree(ip_set_hash);
++ return res;
++ }
++ return 0;
++}
++
++static void __exit ip_set_fini(void)
++{
++ /* There can't be any existing set or binding */
++ nf_unregister_sockopt(&so_set);
++ vfree(ip_set_list);
++ vfree(ip_set_hash);
++ DP("these are the famous last words");
++}
++
++EXPORT_SYMBOL(ip_set_register_set_type);
++EXPORT_SYMBOL(ip_set_unregister_set_type);
++
++EXPORT_SYMBOL(ip_set_get_byname);
++EXPORT_SYMBOL(ip_set_get_byindex);
++EXPORT_SYMBOL(ip_set_put);
++
++EXPORT_SYMBOL(ip_set_addip_kernel);
++EXPORT_SYMBOL(ip_set_delip_kernel);
++EXPORT_SYMBOL(ip_set_testip_kernel);
++
++module_init(ip_set_init);
++module_exit(ip_set_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_iphash.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_iphash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_iphash.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,429 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_iphash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = ip & map->netmask;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ /* No shortcut at testing - there can be deleted
++ * entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == *hash_ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = *hash_ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __addip((struct ip_set_iphash *) set->data,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t hash_ip, *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_iphash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new hash size */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_iphash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iphash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ tmp->netmask = map->netmask;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_iphash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __addip(tmp, *elem, &hash_ip);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t id, *elem;
++
++ if (!ip)
++ return -ERANGE;
++
++ id = hash_id(set, ip, hash_ip);
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iphash *req =
++ (struct ip_set_req_iphash *) data;
++
++ if (size != sizeof(struct ip_set_req_iphash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iphash_create *req =
++ (struct ip_set_req_iphash_create *) data;
++ struct ip_set_iphash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_iphash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iphash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_iphash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iphash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ map->netmask = req->netmask;
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ struct ip_set_req_iphash_create *header =
++ (struct ip_set_req_iphash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++ header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_iphash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_iphash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iphash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iphash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_iphash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_iphash);
++}
++
++static void __exit ip_set_iphash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_iphash);
++}
++
++module_init(ip_set_iphash_init);
++module_exit(ip_set_iphash_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_ipmap.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_ipmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_ipmap.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,336 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the single bitmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <linux/netfilter_ipv4/ip_set_ipmap.h>
++
++static inline ip_set_ip_t
++ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
++{
++ return (ip - map->first_ip)/map->hosts;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
++ return -EEXIST;
++
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ DP("%u.%u.%u.%u", HIPQUAD(req->ip));
++ return __addip(set, req->ip, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __addip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = ip & map->netmask;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
++ return -EEXIST;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipmap *req =
++ (struct ip_set_req_ipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_ipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_ipmap_create *req =
++ (struct ip_set_req_ipmap_create *) data;
++ struct ip_set_ipmap *map;
++
++ if (size != sizeof(struct ip_set_req_ipmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++ HIPQUAD(req->from), HIPQUAD(req->to));
++
++ if (req->from > req->to) {
++ DP("bad ip range");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipmap));
++ return -ENOMEM;
++ }
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ map->netmask = req->netmask;
++
++ if (req->netmask == 0xFFFFFFFF) {
++ map->hosts = 1;
++ map->sizeid = map->last_ip - map->first_ip + 1;
++ } else {
++ unsigned int mask_bits, netmask_bits;
++ ip_set_ip_t mask;
++
++ map->first_ip &= map->netmask; /* Should we better bark? */
++
++ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
++ netmask_bits = mask_to_bits(map->netmask);
++
++ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
++ || netmask_bits <= mask_bits)
++ return -ENOEXEC;
++
++ DP("mask_bits %u, netmask_bits %u",
++ mask_bits, netmask_bits);
++ map->hosts = 2 << (32 - netmask_bits - 1);
++ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
++ }
++ if (map->sizeid > MAX_RANGE + 1) {
++ ip_set_printk("range too big (max %d addresses)",
++ MAX_RANGE+1);
++ kfree(map);
++ return -ENOEXEC;
++ }
++ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
++ newbytes = bitmap_bytes(0, map->sizeid - 1);
++ map->members = kmalloc(newbytes, GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ kfree(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ struct ip_set_req_ipmap_create *header =
++ (struct ip_set_req_ipmap_create *) data;
++
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++ header->netmask = map->netmask;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++
++ return bitmap_bytes(0, map->sizeid - 1);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
++ int bytes = bitmap_bytes(0, map->sizeid - 1);
++
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_ipmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_ipmap),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_ipmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipmap type of IP sets");
++
++static int __init ip_set_ipmap_init(void)
++{
++ return ip_set_register_set_type(&ip_set_ipmap);
++}
++
++static void __exit ip_set_ipmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_ipmap);
++}
++
++module_init(ip_set_ipmap_init);
++module_exit(ip_set_ipmap_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_ipporthash.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_ipporthash.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_ipporthash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_ipporthash.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,581 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an ip+port hash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
++
++static int limit = MAX_RANGE;
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ struct iphdr *iph = ip_hdr(skb);
++#else
++ struct iphdr *iph = skb->nh.iph;
++#endif
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++
++static inline __u32
++jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map =
++ (struct ip_set_ipporthash *) set->data;
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = HASH_IP(map, ip, port);
++ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ /* No shortcut at testing - there can be deleted
++ * entries. */
++ }
++ return UINT_MAX;
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++ int res;
++
++ if (flags[index+1] == 0)
++ return 0;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return 0;
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++ return (res < 0 ? 0 : res);
++
++}
++
++static inline int
++__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, hash_ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == hash_ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = hash_ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ if (map->elements > limit)
++ return -ERANGE;
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = HASH_IP(map, ip, port);
++
++ return __add_haship(map, *hash_ip);
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __addip((struct ip_set_ipporthash *) set->data,
++ req->ip, req->port, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++
++ if (flags[index+1] == 0)
++ return -EINVAL;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __addip((struct ip_set_ipporthash *) set->data,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_ipporthash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new hash size */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipporthash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ tmp->first_ip = map->first_ip;
++ tmp->last_ip = map->last_ip;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __add_haship(tmp, *elem);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t id;
++ ip_set_ip_t *elem;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++
++ id = hash_id(set, ip, port, hash_ip);
++
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_ipporthash *req =
++ (struct ip_set_req_ipporthash *) data;
++
++ if (size != sizeof(struct ip_set_req_ipporthash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, req->port, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port;
++
++ if (flags[index+1] == 0)
++ return -EINVAL;
++
++ port = get_port(skb, flags[index+1]);
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ port,
++ hash_ip);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_ipporthash_create *req =
++ (struct ip_set_req_ipporthash_create *) data;
++ struct ip_set_ipporthash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_ipporthash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_ipporthash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_ipporthash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_ipporthash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ struct ip_set_req_ipporthash_create *header =
++ (struct ip_set_req_ipporthash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_ipporthash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_ipporthash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_ipporthash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("ipporthash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_ipporthash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_ipporthash);
++}
++
++static void __exit ip_set_ipporthash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_ipporthash);
++}
++
++module_init(ip_set_ipporthash_init);
++module_exit(ip_set_ipporthash_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_iptree.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_iptree.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_iptree.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,612 @@
++/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the iptree type */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++/* Backward compatibility */
++#ifndef __nocast
++#define __nocast
++#endif
++
++#include <linux/netfilter_ipv4/ip_set_iptree.h>
++
++static int limit = MAX_RANGE;
++
++/* Garbage collection interval in seconds: */
++#define IPTREE_GC_TIME 5*60
++/* Sleep so many milliseconds before trying again
++ * to delete the gc timer at destroying/flushing a set */
++#define IPTREE_DESTROY_SLEEP 100
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct kmem_cache *branch_cachep;
++static struct kmem_cache *leaf_cachep;
++#else
++static kmem_cache_t *branch_cachep;
++static kmem_cache_t *leaf_cachep;
++#endif
++
++#if defined(__LITTLE_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[3]; \
++ b = ((unsigned char *)addrp)[2]; \
++ c = ((unsigned char *)addrp)[1]; \
++ d = ((unsigned char *)addrp)[0]; \
++} while (0)
++#elif defined(__BIG_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[0]; \
++ b = ((unsigned char *)addrp)[1]; \
++ c = ((unsigned char *)addrp)[2]; \
++ d = ((unsigned char *)addrp)[3]; \
++} while (0)
++#else
++#error "Please fix asm/byteorder.h"
++#endif /* __LITTLE_ENDIAN */
++
++#define TESTIP_WALK(map, elem, branch) do { \
++ if ((map)->tree[elem]) { \
++ branch = (map)->tree[elem]; \
++ } else \
++ return 0; \
++} while (0)
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++
++ if (!ip)
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
++ TESTIP_WALK(map, a, btree);
++ TESTIP_WALK(btree, b, ctree);
++ TESTIP_WALK(ctree, c, dtree);
++ DP("%lu %lu", dtree->expires[d], jiffies);
++ return dtree->expires[d]
++ && (!map->timeout
++ || time_after(dtree->expires[d], jiffies));
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ return __testip(set, req->ip, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res;
++
++ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
++ flags[index] & IPSET_SRC ? "SRC" : "DST",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ NIPQUAD(ip_hdr(skb)->saddr),
++ NIPQUAD(ip_hdr(skb)->daddr));
++#else
++ NIPQUAD(skb->nh.iph->saddr),
++ NIPQUAD(skb->nh.iph->daddr));
++#endif
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++ return (res < 0 ? 0 : res);
++}
++
++#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
++ if ((map)->tree[elem]) { \
++ DP("found %u", elem); \
++ branch = (map)->tree[elem]; \
++ } else { \
++ branch = (type *) \
++ kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (branch == NULL) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[elem] = branch; \
++ DP("alloc %u", elem); \
++ } \
++} while (0)
++
++static inline int
++__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++ int ret = 0;
++
++ if (!ip || map->elements >= limit)
++ /* We could call the garbage collector
++ * but it's probably overkill */
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
++ ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
++ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
++ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++ ret = -EEXIST;
++ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
++ /* Lottery: I won! */
++ if (dtree->expires[d] == 0)
++ dtree->expires[d] = 1;
++ DP("%u %lu", d, dtree->expires[d]);
++ if (ret == 0)
++ map->elements++;
++ return ret;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
++ return __addip(set, req->ip,
++ req->timeout ? req->timeout : map->timeout,
++ hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ return __addip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ map->timeout,
++ hash_ip);
++}
++
++#define DELIP_WALK(map, elem, branch) do { \
++ if ((map)->tree[elem]) { \
++ branch = (map)->tree[elem]; \
++ } else \
++ return -EEXIST; \
++} while (0)
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned char a,b,c,d;
++
++ if (!ip)
++ return -ERANGE;
++
++ *hash_ip = ip;
++ ABCD(a, b, c, d, hash_ip);
++ DELIP_WALK(map, a, btree);
++ DELIP_WALK(btree, b, ctree);
++ DELIP_WALK(ctree, c, dtree);
++
++ if (dtree->expires[d]) {
++ dtree->expires[d] = 0;
++ map->elements--;
++ return 0;
++ }
++ return -EEXIST;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptree *req =
++ (struct ip_set_req_iptree *) data;
++
++ if (size != sizeof(struct ip_set_req_iptree)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++#define LOOP_WALK_BEGIN(map, i, branch) \
++ for (i = 0; i < 256; i++) { \
++ if (!(map)->tree[i]) \
++ continue; \
++ branch = (map)->tree[i]
++
++#define LOOP_WALK_END }
++
++static void ip_tree_gc(unsigned long ul_set)
++{
++ struct ip_set *set = (void *) ul_set;
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ unsigned char i,j,k;
++
++ i = j = k = 0;
++ DP("gc: %s", set->name);
++ write_lock_bh(&set->lock);
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]) {
++ DP("gc: %u %u %u %u: expires %lu jiffies %lu",
++ a, b, c, d,
++ dtree->expires[d], jiffies);
++ if (map->timeout
++ && time_before(dtree->expires[d], jiffies)) {
++ dtree->expires[d] = 0;
++ map->elements--;
++ } else
++ k = 1;
++ }
++ }
++ if (k == 0) {
++ DP("gc: %s: leaf %u %u %u empty",
++ set->name, a, b, c);
++ kmem_cache_free(leaf_cachep, dtree);
++ ctree->tree[c] = NULL;
++ } else {
++ DP("gc: %s: leaf %u %u %u not empty",
++ set->name, a, b, c);
++ j = 1;
++ k = 0;
++ }
++ LOOP_WALK_END;
++ if (j == 0) {
++ DP("gc: %s: branch %u %u empty",
++ set->name, a, b);
++ kmem_cache_free(branch_cachep, ctree);
++ btree->tree[b] = NULL;
++ } else {
++ DP("gc: %s: branch %u %u not empty",
++ set->name, a, b);
++ i = 1;
++ j = k = 0;
++ }
++ LOOP_WALK_END;
++ if (i == 0) {
++ DP("gc: %s: branch %u empty",
++ set->name, a);
++ kmem_cache_free(branch_cachep, btree);
++ map->tree[a] = NULL;
++ } else {
++ DP("gc: %s: branch %u not empty",
++ set->name, a);
++ i = j = k = 0;
++ }
++ LOOP_WALK_END;
++ write_unlock_bh(&set->lock);
++
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static inline void init_gc_timer(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ /* Even if there is no timeout for the entries,
++ * we still have to call gc because delete
++ * do not clean up empty branches */
++ map->gc_interval = IPTREE_GC_TIME;
++ init_timer(&map->gc);
++ map->gc.data = (unsigned long) set;
++ map->gc.function = ip_tree_gc;
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iptree_create *req =
++ (struct ip_set_req_iptree_create *) data;
++ struct ip_set_iptree *map;
++
++ if (size != sizeof(struct ip_set_req_iptree_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_iptree_create),
++ size);
++ return -EINVAL;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_iptree));
++ return -ENOMEM;
++ }
++ memset(map, 0, sizeof(*map));
++ map->timeout = req->timeout;
++ map->elements = 0;
++ set->data = map;
++
++ init_gc_timer(set);
++
++ return 0;
++}
++
++static void __flush(struct ip_set_iptree *map)
++{
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ kmem_cache_free(leaf_cachep, dtree);
++ LOOP_WALK_END;
++ kmem_cache_free(branch_cachep, ctree);
++ LOOP_WALK_END;
++ kmem_cache_free(branch_cachep, btree);
++ LOOP_WALK_END;
++ map->elements = 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++
++ /* gc might be running */
++ while (!del_timer(&map->gc))
++ msleep(IPTREE_DESTROY_SLEEP);
++ __flush(map);
++ kfree(map);
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ unsigned int timeout = map->timeout;
++
++ /* gc might be running */
++ while (!del_timer(&map->gc))
++ msleep(IPTREE_DESTROY_SLEEP);
++ __flush(map);
++ memset(map, 0, sizeof(*map));
++ map->timeout = timeout;
++
++ init_gc_timer(set);
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_req_iptree_create *header =
++ (struct ip_set_req_iptree_create *) data;
++
++ header->timeout = map->timeout;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ unsigned int count = 0;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
++ count++;
++ }
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++
++ DP("members %u", count);
++ return (count * sizeof(struct ip_set_req_iptree));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
++ struct ip_set_iptreeb *btree;
++ struct ip_set_iptreec *ctree;
++ struct ip_set_iptreed *dtree;
++ unsigned int a,b,c,d;
++ size_t offset = 0;
++ struct ip_set_req_iptree *entry;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ LOOP_WALK_BEGIN(btree, b, ctree);
++ LOOP_WALK_BEGIN(ctree, c, dtree);
++ for (d = 0; d < 256; d++) {
++ if (dtree->expires[d]
++ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
++ entry = (struct ip_set_req_iptree *)(data + offset);
++ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
++ entry->timeout = !map->timeout ? 0
++ : (dtree->expires[d] - jiffies)/HZ;
++ offset += sizeof(struct ip_set_req_iptree);
++ }
++ }
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++ LOOP_WALK_END;
++}
++
++static struct ip_set_type ip_set_iptree = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_iptree),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iptree_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptree type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_iptree_init(void)
++{
++ int ret;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ branch_cachep = kmem_cache_create("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb),
++ 0, 0, NULL);
++#else
++ branch_cachep = kmem_cache_create("ip_set_iptreeb",
++ sizeof(struct ip_set_iptreeb),
++ 0, 0, NULL, NULL);
++#endif
++ if (!branch_cachep) {
++ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ leaf_cachep = kmem_cache_create("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed),
++ 0, 0, NULL);
++#else
++ leaf_cachep = kmem_cache_create("ip_set_iptreed",
++ sizeof(struct ip_set_iptreed),
++ 0, 0, NULL, NULL);
++#endif
++ if (!leaf_cachep) {
++ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
++ ret = -ENOMEM;
++ goto free_branch;
++ }
++ ret = ip_set_register_set_type(&ip_set_iptree);
++ if (ret == 0)
++ goto out;
++
++ kmem_cache_destroy(leaf_cachep);
++ free_branch:
++ kmem_cache_destroy(branch_cachep);
++ out:
++ return ret;
++}
++
++static void __exit ip_set_iptree_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_iptree);
++ kmem_cache_destroy(leaf_cachep);
++ kmem_cache_destroy(branch_cachep);
++}
++
++module_init(ip_set_iptree_init);
++module_exit(ip_set_iptree_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_iptreemap.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_iptreemap.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_iptreemap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_iptreemap.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,829 @@
++/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published by
++ * the Free Software Foundation.
++ */
++
++/* This modules implements the iptreemap ipset type. It uses bitmaps to
++ * represent every single IPv4 address as a single bit. The bitmaps are managed
++ * in a tree structure, where the first three octets of an addresses are used
++ * as an index to find the bitmap and the last octet is used as the bit number.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
++
++#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
++#define IPTREEMAP_DESTROY_SLEEP (100)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++static struct kmem_cache *cachep_b;
++static struct kmem_cache *cachep_c;
++static struct kmem_cache *cachep_d;
++#else
++static kmem_cache_t *cachep_b;
++static kmem_cache_t *cachep_c;
++static kmem_cache_t *cachep_d;
++#endif
++
++static struct ip_set_iptreemap_d *fullbitmap_d;
++static struct ip_set_iptreemap_c *fullbitmap_c;
++static struct ip_set_iptreemap_b *fullbitmap_b;
++
++#if defined(__LITTLE_ENDIAN)
++#define ABCD(a, b, c, d, addr) \
++ do { \
++ a = ((unsigned char *)addr)[3]; \
++ b = ((unsigned char *)addr)[2]; \
++ c = ((unsigned char *)addr)[1]; \
++ d = ((unsigned char *)addr)[0]; \
++ } while (0)
++#elif defined(__BIG_ENDIAN)
++#define ABCD(a,b,c,d,addrp) do { \
++ a = ((unsigned char *)addrp)[0]; \
++ b = ((unsigned char *)addrp)[1]; \
++ c = ((unsigned char *)addrp)[2]; \
++ d = ((unsigned char *)addrp)[3]; \
++} while (0)
++#else
++#error "Please fix asm/byteorder.h"
++#endif /* __LITTLE_ENDIAN */
++
++#define TESTIP_WALK(map, elem, branch, full) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) \
++ return 0; \
++ else if (branch == full) \
++ return 1; \
++ } while (0)
++
++#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) { \
++ branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (!branch) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[elem] = branch; \
++ } else if (branch == full) { \
++ return -EEXIST; \
++ } \
++ } while (0)
++
++#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
++ for (a = a1; a <= a2; a++) { \
++ branch = (map)->tree[a]; \
++ if (branch != full) { \
++ if ((a > a1 && a < a2) || (hint)) { \
++ if (branch) \
++ free(branch); \
++ (map)->tree[a] = full; \
++ continue; \
++ } else if (!branch) { \
++ branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
++ if (!branch) \
++ return -ENOMEM; \
++ memset(branch, 0, sizeof(*branch)); \
++ (map)->tree[a] = branch; \
++ }
++
++#define ADDIP_RANGE_LOOP_END() \
++ } \
++ }
++
++#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
++ do { \
++ branch = (map)->tree[elem]; \
++ if (!branch) { \
++ return -EEXIST; \
++ } else if (branch == full) { \
++ branch = kmem_cache_alloc(cachep, flags); \
++ if (!branch) \
++ return -ENOMEM; \
++ memcpy(branch, full, sizeof(*full)); \
++ (map)->tree[elem] = branch; \
++ } \
++ } while (0)
++
++#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
++ for (a = a1; a <= a2; a++) { \
++ branch = (map)->tree[a]; \
++ if (branch) { \
++ if ((a > a1 && a < a2) || (hint)) { \
++ if (branch != full) \
++ free(branch); \
++ (map)->tree[a] = NULL; \
++ continue; \
++ } else if (branch == full) { \
++ branch = kmem_cache_alloc(cachep, flags); \
++ if (!branch) \
++ return -ENOMEM; \
++ memcpy(branch, full, sizeof(*branch)); \
++ (map)->tree[a] = branch; \
++ }
++
++#define DELIP_RANGE_LOOP_END() \
++ } \
++ }
++
++#define LOOP_WALK_BEGIN(map, i, branch) \
++ for (i = 0; i < 256; i++) { \
++ branch = (map)->tree[i]; \
++ if (likely(!branch)) \
++ continue;
++
++#define LOOP_WALK_END() \
++ }
++
++#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
++ count = -256; \
++ for (i = 0; i < 256; i++) { \
++ branch = (map)->tree[i]; \
++ if (likely(!branch)) \
++ continue; \
++ count++; \
++ if (branch == full) { \
++ count++; \
++ continue; \
++ }
++
++#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
++ if (-256 == count) { \
++ kmem_cache_free(cachep, branch); \
++ (map)->tree[i] = NULL; \
++ } else if (256 == count) { \
++ kmem_cache_free(cachep, branch); \
++ (map)->tree[i] = full; \
++ } \
++ }
++
++#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
++ for (i = 0; i < 256; i++) { \
++ if (!(map)->tree[i]) { \
++ if (inrange) { \
++ count++; \
++ inrange = 0; \
++ } \
++ continue; \
++ } \
++ branch = (map)->tree[i];
++
++#define LOOP_WALK_END_COUNT() \
++ }
++
++#define MIN(a, b) (a < b ? a : b)
++#define MAX(a, b) (a > b ? a : b)
++
++#define GETVALUE1(a, a1, b1, r) \
++ (a == a1 ? b1 : r)
++
++#define GETVALUE2(a, b, a1, b1, c1, r) \
++ (a == a1 && b == b1 ? c1 : r)
++
++#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
++ (a == a1 && b == b1 && c == c1 ? d1 : r)
++
++#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE1(a, a1, b1, 0) == 0 \
++ && GETVALUE1(a, a2, b2, 255) == 255 \
++ && c1 == 0 \
++ && c2 == 255 \
++ && d1 == 0 \
++ && d2 == 255 \
++ )
++
++#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
++ && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
++ && d1 == 0 \
++ && d2 == 255 \
++ )
++
++#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
++ ( \
++ GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
++ && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
++ )
++
++
++static inline void
++free_d(struct ip_set_iptreemap_d *map)
++{
++ kmem_cache_free(cachep_d, map);
++}
++
++static inline void
++free_c(struct ip_set_iptreemap_c *map)
++{
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int i;
++
++ LOOP_WALK_BEGIN(map, i, dtree) {
++ if (dtree != fullbitmap_d)
++ free_d(dtree);
++ } LOOP_WALK_END();
++
++ kmem_cache_free(cachep_c, map);
++}
++
++static inline void
++free_b(struct ip_set_iptreemap_b *map)
++{
++ struct ip_set_iptreemap_c *ctree;
++ unsigned int i;
++
++ LOOP_WALK_BEGIN(map, i, ctree) {
++ if (ctree != fullbitmap_c)
++ free_c(ctree);
++ } LOOP_WALK_END();
++
++ kmem_cache_free(cachep_b, map);
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a, b, c, d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ TESTIP_WALK(map, a, btree, fullbitmap_b);
++ TESTIP_WALK(btree, b, ctree, fullbitmap_c);
++ TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
++
++ return !!test_bit(d, (void *) dtree->bitmap);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __testip(set, req->start, hash_ip);
++}
++
++static int
++testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++ int res;
++
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a, b, c, d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
++ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
++ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
++
++ if (test_and_set_bit(d, (void *) dtree->bitmap))
++ return -EEXIST;
++
++ set_bit(b, (void *) btree->dirty);
++
++ return 0;
++}
++
++static inline int
++__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d;
++ unsigned char a1, b1, c1, d1;
++ unsigned char a2, b2, c2, d2;
++
++ if (start == end)
++ return __addip_single(set, start, hash_ip);
++
++ *hash_ip = start;
++
++ ABCD(a1, b1, c1, d1, &start);
++ ABCD(a2, b2, c2, d2, &end);
++
++ /* This is sooo ugly... */
++ ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
++ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
++ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
++ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
++ set_bit(d, (void *) dtree->bitmap);
++ set_bit(b, (void *) btree->dirty);
++ } ADDIP_RANGE_LOOP_END();
++ } ADDIP_RANGE_LOOP_END();
++ } ADDIP_RANGE_LOOP_END();
++
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++
++ return __addip_single(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned char a,b,c,d;
++
++ *hash_ip = ip;
++
++ ABCD(a, b, c, d, hash_ip);
++
++ DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
++ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
++ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
++
++ if (!test_and_clear_bit(d, (void *) dtree->bitmap))
++ return -EEXIST;
++
++ set_bit(b, (void *) btree->dirty);
++
++ return 0;
++}
++
++static inline int
++__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d;
++ unsigned char a1, b1, c1, d1;
++ unsigned char a2, b2, c2, d2;
++
++ if (start == end)
++ return __delip_single(set, start, hash_ip, flags);
++
++ *hash_ip = start;
++
++ ABCD(a1, b1, c1, d1, &start);
++ ABCD(a2, b2, c2, d2, &end);
++
++ /* This is sooo ugly... */
++ DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
++ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
++ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
++ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
++ clear_bit(d, (void *) dtree->bitmap);
++ set_bit(b, (void *) btree->dirty);
++ } DELIP_RANGE_LOOP_END();
++ } DELIP_RANGE_LOOP_END();
++ } DELIP_RANGE_LOOP_END();
++
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
++
++ if (size != sizeof(struct ip_set_req_iptreemap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
++ return -EINVAL;
++ }
++
++ return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
++}
++
++static int
++delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
++{
++ return __delip_single(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip,
++ GFP_ATOMIC);
++}
++
++/* Check the status of the bitmap
++ * -1 == all bits cleared
++ * 1 == all bits set
++ * 0 == anything else
++ */
++static inline int
++bitmap_status(struct ip_set_iptreemap_d *dtree)
++{
++ unsigned char first = dtree->bitmap[0];
++ int a;
++
++ for (a = 1; a < 32; a++)
++ if (dtree->bitmap[a] != first)
++ return 0;
++
++ return (first == 0 ? -1 : (first == 255 ? 1 : 0));
++}
++
++static void
++gc(unsigned long addr)
++{
++ struct ip_set *set = (struct ip_set *) addr;
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c;
++ int i, j, k;
++
++ write_lock_bh(&set->lock);
++
++ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
++ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
++ if (!test_and_clear_bit(b, (void *) btree->dirty))
++ continue;
++ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
++ switch (bitmap_status(dtree)) {
++ case -1:
++ kmem_cache_free(cachep_d, dtree);
++ ctree->tree[c] = NULL;
++ k--;
++ break;
++ case 1:
++ kmem_cache_free(cachep_d, dtree);
++ ctree->tree[c] = fullbitmap_d;
++ k++;
++ break;
++ }
++ } LOOP_WALK_END();
++ } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
++ } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
++
++ write_unlock_bh(&set->lock);
++
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static inline void
++init_gc_timer(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ init_timer(&map->gc);
++ map->gc.data = (unsigned long) set;
++ map->gc.function = gc;
++ map->gc.expires = jiffies + map->gc_interval * HZ;
++ add_timer(&map->gc);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
++ struct ip_set_iptreemap *map;
++
++ if (size != sizeof(struct ip_set_req_iptreemap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
++ return -EINVAL;
++ }
++
++ map = kzalloc(sizeof(*map), GFP_KERNEL);
++ if (!map)
++ return -ENOMEM;
++
++ map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
++ set->data = map;
++
++ init_gc_timer(set);
++
++ return 0;
++}
++
++static inline void __flush(struct ip_set_iptreemap *map)
++{
++ struct ip_set_iptreemap_b *btree;
++ unsigned int a;
++
++ LOOP_WALK_BEGIN(map, a, btree);
++ if (btree != fullbitmap_b)
++ free_b(btree);
++ LOOP_WALK_END();
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ while (!del_timer(&map->gc))
++ msleep(IPTREEMAP_DESTROY_SLEEP);
++
++ __flush(map);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++
++ while (!del_timer(&map->gc))
++ msleep(IPTREEMAP_DESTROY_SLEEP);
++
++ __flush(map);
++
++ memset(map, 0, sizeof(*map));
++
++ init_gc_timer(set);
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
++
++ header->gc_interval = map->gc_interval;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d, inrange = 0, count = 0;
++
++ LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
++ LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
++ LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
++ for (d = 0; d < 256; d++) {
++ if (test_bit(d, (void *) dtree->bitmap)) {
++ inrange = 1;
++ } else if (inrange) {
++ count++;
++ inrange = 0;
++ }
++ }
++ } LOOP_WALK_END_COUNT();
++ } LOOP_WALK_END_COUNT();
++ } LOOP_WALK_END_COUNT();
++
++ if (inrange)
++ count++;
++
++ return (count * sizeof(struct ip_set_req_iptreemap));
++}
++
++static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
++{
++ struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
++
++ entry->start = start;
++ entry->end = end;
++
++ return sizeof(*entry);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
++ struct ip_set_iptreemap_b *btree;
++ struct ip_set_iptreemap_c *ctree;
++ struct ip_set_iptreemap_d *dtree;
++ unsigned int a, b, c, d, inrange = 0;
++ size_t offset = 0;
++ ip_set_ip_t start = 0, end = 0, ip;
++
++ LOOP_WALK_BEGIN(map, a, btree) {
++ LOOP_WALK_BEGIN(btree, b, ctree) {
++ LOOP_WALK_BEGIN(ctree, c, dtree) {
++ for (d = 0; d < 256; d++) {
++ if (test_bit(d, (void *) dtree->bitmap)) {
++ ip = ((a << 24) | (b << 16) | (c << 8) | d);
++ if (!inrange) {
++ inrange = 1;
++ start = ip;
++ } else if (end < ip - 1) {
++ offset += add_member(data, offset, start, end);
++ start = ip;
++ }
++ end = ip;
++ } else if (inrange) {
++ offset += add_member(data, offset, start, end);
++ inrange = 0;
++ }
++ }
++ } LOOP_WALK_END();
++ } LOOP_WALK_END();
++ } LOOP_WALK_END();
++
++ if (inrange)
++ add_member(data, offset, start, end);
++}
++
++static struct ip_set_type ip_set_iptreemap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = create,
++ .destroy = destroy,
++ .flush = flush,
++ .reqsize = sizeof(struct ip_set_req_iptreemap),
++ .addip = addip,
++ .addip_kernel = addip_kernel,
++ .delip = delip,
++ .delip_kernel = delip_kernel,
++ .testip = testip,
++ .testip_kernel = testip_kernel,
++ .header_size = sizeof(struct ip_set_req_iptreemap_create),
++ .list_header = list_header,
++ .list_members_size = list_members_size,
++ .list_members = list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
++MODULE_DESCRIPTION("iptreemap type of IP sets");
++
++static int __init ip_set_iptreemap_init(void)
++{
++ int ret = -ENOMEM;
++ int a;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b),
++ 0, 0, NULL);
++#else
++ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_b) {
++ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
++ goto out;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c),
++ 0, 0, NULL);
++#else
++ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
++ sizeof(struct ip_set_iptreemap_c),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_c) {
++ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
++ goto outb;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d),
++ 0, 0, NULL);
++#else
++ cachep_d = kmem_cache_create("ip_set_iptreemap_d",
++ sizeof(struct ip_set_iptreemap_d),
++ 0, 0, NULL, NULL);
++#endif
++ if (!cachep_d) {
++ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
++ goto outc;
++ }
++
++ fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
++ if (!fullbitmap_d)
++ goto outd;
++
++ fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
++ if (!fullbitmap_c)
++ goto outbitmapd;
++
++ fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
++ if (!fullbitmap_b)
++ goto outbitmapc;
++
++ ret = ip_set_register_set_type(&ip_set_iptreemap);
++ if (0 > ret)
++ goto outbitmapb;
++
++ /* Now init our global bitmaps */
++ memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
++
++ for (a = 0; a < 256; a++)
++ fullbitmap_c->tree[a] = fullbitmap_d;
++
++ for (a = 0; a < 256; a++)
++ fullbitmap_b->tree[a] = fullbitmap_c;
++ memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
++
++ return 0;
++
++outbitmapb:
++ kmem_cache_free(cachep_b, fullbitmap_b);
++outbitmapc:
++ kmem_cache_free(cachep_c, fullbitmap_c);
++outbitmapd:
++ kmem_cache_free(cachep_d, fullbitmap_d);
++outd:
++ kmem_cache_destroy(cachep_d);
++outc:
++ kmem_cache_destroy(cachep_c);
++outb:
++ kmem_cache_destroy(cachep_b);
++out:
++
++ return ret;
++}
++
++static void __exit ip_set_iptreemap_fini(void)
++{
++ ip_set_unregister_set_type(&ip_set_iptreemap);
++ kmem_cache_free(cachep_d, fullbitmap_d);
++ kmem_cache_free(cachep_c, fullbitmap_c);
++ kmem_cache_free(cachep_b, fullbitmap_b);
++ kmem_cache_destroy(cachep_d);
++ kmem_cache_destroy(cachep_c);
++ kmem_cache_destroy(cachep_b);
++}
++
++module_init(ip_set_iptreemap_init);
++module_exit(ip_set_iptreemap_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_macipmap.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_macipmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_macipmap.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,375 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing an IP set type: the macipmap type */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/if_ether.h>
++#include <linux/vmalloc.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_macipmap.h>
++
++static int
++testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
++ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->ip < map->first_ip || req->ip > map->last_ip)
++ return -ERANGE;
++
++ *hash_ip = req->ip;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
++ if (test_bit(IPSET_MACIP_ISSET,
++ (void *) &table[req->ip - map->first_ip].flags)) {
++ return (memcmp(req->ethernet,
++ &table[req->ip - map->first_ip].ethernet,
++ ETH_ALEN) == 0);
++ } else {
++ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++ }
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++ ip_set_ip_t ip;
++
++ ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return 0;
++
++ *hash_ip = ip;
++ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ if (test_bit(IPSET_MACIP_ISSET,
++ (void *) &table[ip - map->first_ip].flags)) {
++ /* Is mac pointer valid?
++ * If so, compare... */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ return (skb_mac_header(skb) >= skb->head
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
++#else
++ return (skb->mac.raw >= skb->head
++ && (skb->mac.raw + ETH_HLEN) <= skb->data
++#endif
++ && (memcmp(eth_hdr(skb)->h_source,
++ &table[ip - map->first_ip].ethernet,
++ ETH_ALEN) == 0));
++ } else {
++ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
++ }
++}
++
++/* returns 0 on success */
++static inline int
++__addip(struct ip_set *set,
++ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (test_and_set_bit(IPSET_MACIP_ISSET,
++ (void *) &table[ip - map->first_ip].flags))
++ return -EEXIST;
++
++ *hash_ip = ip;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
++ return 0;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_macipmap *req =
++ (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++ return __addip(set, req->ip, req->ethernet, hash_ip);
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t ip;
++
++ ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (!(skb_mac_header(skb) >= skb->head
++ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
++#else
++ if (!(skb->mac.raw >= skb->head
++ && (skb->mac.raw + ETH_HLEN) <= skb->data))
++#endif
++ return -EINVAL;
++
++ return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
++}
++
++static inline int
++__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_macip *table =
++ (struct ip_set_macip *) map->members;
++
++ if (ip < map->first_ip || ip > map->last_ip)
++ return -ERANGE;
++ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
++ (void *)&table[ip - map->first_ip].flags))
++ return -EEXIST;
++
++ *hash_ip = ip;
++ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_macipmap *req =
++ (struct ip_set_req_macipmap *) data;
++
++ if (size != sizeof(struct ip_set_req_macipmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap),
++ size);
++ return -EINVAL;
++ }
++ return __delip(set, req->ip, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __delip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
++{
++ return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_macipmap_create *req =
++ (struct ip_set_req_macipmap_create *) data;
++ struct ip_set_macipmap *map;
++
++ if (size != sizeof(struct ip_set_req_macipmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_macipmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u.%u.%u.%u to %u.%u.%u.%u",
++ HIPQUAD(req->from), HIPQUAD(req->to));
++
++ if (req->from > req->to) {
++ DP("bad ip range");
++ return -ENOEXEC;
++ }
++
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big (max %d addresses)",
++ MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_macipmap));
++ return -ENOMEM;
++ }
++ map->flags = req->flags;
++ map->first_ip = req->from;
++ map->last_ip = req->to;
++ newbytes = members_size(map->first_ip, map->last_ip);
++ map->members = ip_set_malloc(newbytes);
++ DP("members: %u %p", newbytes, map->members);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ memset(map->members, 0, members_size(map->first_ip, map->last_ip));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++ struct ip_set_req_macipmap_create *header =
++ (struct ip_set_req_macipmap_create *) data;
++
++ DP("list_header %x %x %u", map->first_ip, map->last_ip,
++ map->flags);
++
++ header->from = map->first_ip;
++ header->to = map->last_ip;
++ header->flags = map->flags;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ DP("%u", members_size(map->first_ip, map->last_ip));
++ return members_size(map->first_ip, map->last_ip);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_macipmap *map =
++ (struct ip_set_macipmap *) set->data;
++
++ int bytes = members_size(map->first_ip, map->last_ip);
++
++ DP("members: %u %p", bytes, map->members);
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_macipmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_macipmap),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_macipmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("macipmap type of IP sets");
++
++static int __init ip_set_macipmap_init(void)
++{
++ init_max_malloc_size();
++ return ip_set_register_set_type(&ip_set_macipmap);
++}
++
++static void __exit ip_set_macipmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_macipmap);
++}
++
++module_init(ip_set_macipmap_init);
++module_exit(ip_set_macipmap_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_nethash.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_nethash.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_nethash.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,497 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing a cidr nethash set */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/jhash.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/vmalloc.h>
++#include <linux/random.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_malloc.h>
++#include <linux/netfilter_ipv4/ip_set_nethash.h>
++
++static int limit = MAX_RANGE;
++
++static inline __u32
++jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
++{
++ return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
++}
++
++static inline __u32
++hash_id_cidr(struct ip_set_nethash *map,
++ ip_set_ip_t ip,
++ unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ __u32 id;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ *hash_ip = pack(ip, cidr);
++
++ for (i = 0; i < map->probes; i++) {
++ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
++ DP("hash key: %u", id);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ if (*elem == *hash_ip)
++ return id;
++ }
++ return UINT_MAX;
++}
++
++static inline __u32
++hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ __u32 id = UINT_MAX;
++ int i;
++
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
++ if (id != UINT_MAX)
++ break;
++ }
++ return id;
++}
++
++static inline int
++__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
++}
++
++static inline int
++__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
++{
++ return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
++}
++
++static int
++testip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
++ : __testip_cidr(set, req->ip, req->cidr, hash_ip));
++}
++
++static int
++testip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ return __testip(set,
++ ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr),
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr),
++#endif
++ hash_ip);
++}
++
++static inline int
++__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
++{
++ __u32 probe;
++ u_int16_t i;
++ ip_set_ip_t *elem;
++
++ for (i = 0; i < map->probes; i++) {
++ probe = jhash_ip(map, i, ip) % map->hashsize;
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
++ if (*elem == ip)
++ return -EEXIST;
++ if (!*elem) {
++ *elem = ip;
++ map->elements++;
++ return 0;
++ }
++ }
++ /* Trigger rehashing */
++ return -EAGAIN;
++}
++
++static inline int
++__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ if (!ip || map->elements >= limit)
++ return -ERANGE;
++
++ *hash_ip = pack(ip, cidr);
++ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
++
++ return __addip_base(map, *hash_ip);
++}
++
++static void
++update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
++{
++ unsigned char next;
++ int i;
++
++ for (i = 0; i < 30 && map->cidr[i]; i++) {
++ if (map->cidr[i] == cidr) {
++ return;
++ } else if (map->cidr[i] < cidr) {
++ next = map->cidr[i];
++ map->cidr[i] = cidr;
++ cidr = next;
++ }
++ }
++ if (i < 30)
++ map->cidr[i] = cidr;
++}
++
++static int
++addip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++ int ret;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ ret = __addip((struct ip_set_nethash *) set->data,
++ req->ip, req->cidr, hash_ip);
++
++ if (ret == 0)
++ update_cidr_sizes((struct ip_set_nethash *) set->data,
++ req->cidr);
++
++ return ret;
++}
++
++static int
++addip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ int ret = -ERANGE;
++ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (map->cidr[0])
++ ret = __addip(map, ip, map->cidr[0], hash_ip);
++
++ return ret;
++}
++
++static int retry(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ ip_set_ip_t *elem;
++ void *members;
++ u_int32_t i, hashsize = map->hashsize;
++ int res;
++ struct ip_set_nethash *tmp;
++
++ if (map->resize == 0)
++ return -ERANGE;
++
++ again:
++ res = 0;
++
++ /* Calculate new parameters */
++ hashsize += (hashsize * map->resize)/100;
++ if (hashsize == map->hashsize)
++ hashsize++;
++
++ ip_set_printk("rehashing of set %s triggered: "
++ "hashsize grows from %u to %u",
++ set->name, map->hashsize, hashsize);
++
++ tmp = kmalloc(sizeof(struct ip_set_nethash)
++ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
++ if (!tmp) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_nethash)
++ + map->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
++ if (!tmp->members) {
++ DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
++ kfree(tmp);
++ return -ENOMEM;
++ }
++ tmp->hashsize = hashsize;
++ tmp->elements = 0;
++ tmp->probes = map->probes;
++ tmp->resize = map->resize;
++ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
++ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
++
++ write_lock_bh(&set->lock);
++ map = (struct ip_set_nethash *) set->data; /* Play safe */
++ for (i = 0; i < map->hashsize && res == 0; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ if (*elem)
++ res = __addip_base(tmp, *elem);
++ }
++ if (res) {
++ /* Failure, try again */
++ write_unlock_bh(&set->lock);
++ harray_free(tmp->members);
++ kfree(tmp);
++ goto again;
++ }
++
++ /* Success at resizing! */
++ members = map->members;
++
++ map->hashsize = tmp->hashsize;
++ map->members = tmp->members;
++ write_unlock_bh(&set->lock);
++
++ harray_free(members);
++ kfree(tmp);
++
++ return 0;
++}
++
++static inline int
++__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
++ ip_set_ip_t *hash_ip)
++{
++ ip_set_ip_t id, *elem;
++
++ if (!ip)
++ return -ERANGE;
++
++ id = hash_id_cidr(map, ip, cidr, hash_ip);
++ if (id == UINT_MAX)
++ return -EEXIST;
++
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
++ *elem = 0;
++ map->elements--;
++ return 0;
++}
++
++static int
++delip(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_ip)
++{
++ struct ip_set_req_nethash *req =
++ (struct ip_set_req_nethash *) data;
++
++ if (size != sizeof(struct ip_set_req_nethash)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash),
++ size);
++ return -EINVAL;
++ }
++ /* TODO: no garbage collection in map->cidr */
++ return __delip((struct ip_set_nethash *) set->data,
++ req->ip, req->cidr, hash_ip);
++}
++
++static int
++delip_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_ip,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ int ret = -ERANGE;
++ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ ? ip_hdr(skb)->saddr
++ : ip_hdr(skb)->daddr);
++#else
++ ? skb->nh.iph->saddr
++ : skb->nh.iph->daddr);
++#endif
++
++ if (map->cidr[0])
++ ret = __delip(map, ip, map->cidr[0], hash_ip);
++
++ return ret;
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ struct ip_set_req_nethash_create *req =
++ (struct ip_set_req_nethash_create *) data;
++ struct ip_set_nethash *map;
++ uint16_t i;
++
++ if (size != sizeof(struct ip_set_req_nethash_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_nethash_create),
++ size);
++ return -EINVAL;
++ }
++
++ if (req->hashsize < 1) {
++ ip_set_printk("hashsize too small");
++ return -ENOEXEC;
++ }
++ if (req->probes < 1) {
++ ip_set_printk("probes too small");
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_nethash)
++ + req->probes * sizeof(uint32_t), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_nethash)
++ + req->probes * sizeof(uint32_t));
++ return -ENOMEM;
++ }
++ for (i = 0; i < req->probes; i++)
++ get_random_bytes(((uint32_t *) map->initval)+i, 4);
++ map->elements = 0;
++ map->hashsize = req->hashsize;
++ map->probes = req->probes;
++ map->resize = req->resize;
++ memset(map->cidr, 0, 30 * sizeof(unsigned char));
++ map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
++ kfree(map);
++ return -ENOMEM;
++ }
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ harray_free(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
++ memset(map->cidr, 0, 30 * sizeof(unsigned char));
++ map->elements = 0;
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ struct ip_set_req_nethash_create *header =
++ (struct ip_set_req_nethash_create *) data;
++
++ header->hashsize = map->hashsize;
++ header->probes = map->probes;
++ header->resize = map->resize;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++
++ return (map->hashsize * sizeof(ip_set_ip_t));
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
++ ip_set_ip_t i, *elem;
++
++ for (i = 0; i < map->hashsize; i++) {
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ ((ip_set_ip_t *)data)[i] = *elem;
++ }
++}
++
++static struct ip_set_type ip_set_nethash = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_nethash),
++ .addip = &addip,
++ .addip_kernel = &addip_kernel,
++ .retry = &retry,
++ .delip = &delip,
++ .delip_kernel = &delip_kernel,
++ .testip = &testip,
++ .testip_kernel = &testip_kernel,
++ .header_size = sizeof(struct ip_set_req_nethash_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("nethash type of IP sets");
++module_param(limit, int, 0600);
++MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
++
++static int __init ip_set_nethash_init(void)
++{
++ return ip_set_register_set_type(&ip_set_nethash);
++}
++
++static void __exit ip_set_nethash_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_nethash);
++}
++
++module_init(ip_set_nethash_init);
++module_exit(ip_set_nethash_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_portmap.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_portmap.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ip_set_portmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ip_set_portmap.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,346 @@
++/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module implementing a port set type as a bitmap */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/errno.h>
++#include <asm/uaccess.h>
++#include <asm/bitops.h>
++#include <linux/spinlock.h>
++
++#include <net/ip.h>
++
++#include <linux/netfilter_ipv4/ip_set_portmap.h>
++
++/* We must handle non-linear skbs */
++static inline ip_set_ip_t
++get_port(const struct sk_buff *skb, u_int32_t flags)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ struct iphdr *iph = ip_hdr(skb);
++#else
++ struct iphdr *iph = skb->nh.iph;
++#endif
++ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
++ switch (iph->protocol) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ /* See comments at tcp_match in ip_tables.c */
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ tcph.source : tcph.dest);
++ }
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (offset)
++ return INVALID_PORT;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
++ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
++#else
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
++#endif
++ /* No choice either */
++ return INVALID_PORT;
++
++ return ntohs(flags & IPSET_SRC ?
++ udph.source : udph.dest);
++ }
++ default:
++ return INVALID_PORT;
++ }
++}
++
++static inline int
++__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++
++ *hash_port = port;
++ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
++ return !!test_bit(port - map->first_port, map->members);
++}
++
++static int
++testport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __testport(set, req->port, hash_port);
++}
++
++static int
++testport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ int res;
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
++ if (port == INVALID_PORT)
++ return 0;
++
++ res = __testport(set, port, hash_port);
++
++ return (res < 0 ? 0 : res);
++}
++
++static inline int
++__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++ if (test_and_set_bit(port - map->first_port, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
++}
++
++static int
++addport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __addport(set, req->port, hash_port);
++}
++
++static int
++addport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __addport(set, port, hash_port);
++}
++
++static inline int
++__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ if (port < map->first_port || port > map->last_port)
++ return -ERANGE;
++ if (!test_and_clear_bit(port - map->first_port, map->members))
++ return -EEXIST;
++
++ *hash_port = port;
++ DP("port %u", port);
++ return 0;
++}
++
++static int
++delport(struct ip_set *set, const void *data, size_t size,
++ ip_set_ip_t *hash_port)
++{
++ struct ip_set_req_portmap *req =
++ (struct ip_set_req_portmap *) data;
++
++ if (size != sizeof(struct ip_set_req_portmap)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap),
++ size);
++ return -EINVAL;
++ }
++ return __delport(set, req->port, hash_port);
++}
++
++static int
++delport_kernel(struct ip_set *set,
++ const struct sk_buff *skb,
++ ip_set_ip_t *hash_port,
++ const u_int32_t *flags,
++ unsigned char index)
++{
++ ip_set_ip_t port = get_port(skb, flags[index]);
++
++ if (port == INVALID_PORT)
++ return -EINVAL;
++
++ return __delport(set, port, hash_port);
++}
++
++static int create(struct ip_set *set, const void *data, size_t size)
++{
++ int newbytes;
++ struct ip_set_req_portmap_create *req =
++ (struct ip_set_req_portmap_create *) data;
++ struct ip_set_portmap *map;
++
++ if (size != sizeof(struct ip_set_req_portmap_create)) {
++ ip_set_printk("data length wrong (want %zu, have %zu)",
++ sizeof(struct ip_set_req_portmap_create),
++ size);
++ return -EINVAL;
++ }
++
++ DP("from %u to %u", req->from, req->to);
++
++ if (req->from > req->to) {
++ DP("bad port range");
++ return -ENOEXEC;
++ }
++
++ if (req->to - req->from > MAX_RANGE) {
++ ip_set_printk("range too big (max %d ports)",
++ MAX_RANGE+1);
++ return -ENOEXEC;
++ }
++
++ map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
++ if (!map) {
++ DP("out of memory for %d bytes",
++ sizeof(struct ip_set_portmap));
++ return -ENOMEM;
++ }
++ map->first_port = req->from;
++ map->last_port = req->to;
++ newbytes = bitmap_bytes(req->from, req->to);
++ map->members = kmalloc(newbytes, GFP_KERNEL);
++ if (!map->members) {
++ DP("out of memory for %d bytes", newbytes);
++ kfree(map);
++ return -ENOMEM;
++ }
++ memset(map->members, 0, newbytes);
++
++ set->data = map;
++ return 0;
++}
++
++static void destroy(struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ kfree(map->members);
++ kfree(map);
++
++ set->data = NULL;
++}
++
++static void flush(struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
++}
++
++static void list_header(const struct ip_set *set, void *data)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ struct ip_set_req_portmap_create *header =
++ (struct ip_set_req_portmap_create *) data;
++
++ DP("list_header %u %u", map->first_port, map->last_port);
++
++ header->from = map->first_port;
++ header->to = map->last_port;
++}
++
++static int list_members_size(const struct ip_set *set)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++
++ return bitmap_bytes(map->first_port, map->last_port);
++}
++
++static void list_members(const struct ip_set *set, void *data)
++{
++ struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
++ int bytes = bitmap_bytes(map->first_port, map->last_port);
++
++ memcpy(data, map->members, bytes);
++}
++
++static struct ip_set_type ip_set_portmap = {
++ .typename = SETTYPE_NAME,
++ .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
++ .protocol_version = IP_SET_PROTOCOL_VERSION,
++ .create = &create,
++ .destroy = &destroy,
++ .flush = &flush,
++ .reqsize = sizeof(struct ip_set_req_portmap),
++ .addip = &addport,
++ .addip_kernel = &addport_kernel,
++ .delip = &delport,
++ .delip_kernel = &delport_kernel,
++ .testip = &testport,
++ .testip_kernel = &testport_kernel,
++ .header_size = sizeof(struct ip_set_req_portmap_create),
++ .list_header = &list_header,
++ .list_members_size = &list_members_size,
++ .list_members = &list_members,
++ .me = THIS_MODULE,
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("portmap type of IP sets");
++
++static int __init ip_set_portmap_init(void)
++{
++ return ip_set_register_set_type(&ip_set_portmap);
++}
++
++static void __exit ip_set_portmap_fini(void)
++{
++ /* FIXME: possible race with ip_set_create() */
++ ip_set_unregister_set_type(&ip_set_portmap);
++}
++
++module_init(ip_set_portmap_init);
++module_exit(ip_set_portmap_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ipt_set.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ipt_set.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ipt_set.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ipt_set.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,160 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Kernel module to match an IP set. */
++
++#include <linux/module.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/version.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ip_set.h>
++#include <linux/netfilter_ipv4/ipt_set.h>
++
++static inline int
++match_set(const struct ipt_set_info *info,
++ const struct sk_buff *skb,
++ int inv)
++{
++ if (ip_set_testip_kernel(info->index, skb, info->flags))
++ inv = !inv;
++ return inv;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++static bool
++#else
++static int
++#endif
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++ const void *matchinfo,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ int offset, unsigned int protoff, bool *hotdrop)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ int offset, unsigned int protoff, int *hotdrop)
++#else
++ int offset, int *hotdrop)
++#endif
++{
++ const struct ipt_set_info_match *info = matchinfo;
++
++ return match_set(&info->match_set,
++ skb,
++ info->match_set.flags[0] & IPSET_MATCH_INV);
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++bool
++#else
++static int
++#endif
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ const void *inf,
++#else
++ const struct ipt_ip *ip,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++ void *matchinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ unsigned int matchsize,
++#endif
++ unsigned int hook_mask)
++{
++ struct ipt_set_info_match *info =
++ (struct ipt_set_info_match *) matchinfo;
++ ip_set_id_t index;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
++ ip_set_printk("invalid matchsize %d", matchsize);
++ return 0;
++ }
++#endif
++
++ index = ip_set_get_byindex(info->match_set.index);
++
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("Cannot find set indentified by id %u to match",
++ info->match_set.index);
++ return 0; /* error */
++ }
++ if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
++ ip_set_printk("That's nasty!");
++ return 0; /* error */
++ }
++
++ return 1;
++}
++
++static void destroy(
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_match *match,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ void *matchinfo, unsigned int matchsize)
++#else
++ void *matchinfo)
++#endif
++{
++ struct ipt_set_info_match *info = matchinfo;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
++ ip_set_printk("invalid matchsize %d", matchsize);
++ return;
++ }
++#endif
++ ip_set_put(info->match_set.index);
++}
++
++static struct ipt_match set_match = {
++ .name = "set",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++ .family = AF_INET,
++#endif
++ .match = &match,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ .matchsize = sizeof(struct ipt_set_info_match),
++#endif
++ .checkentry = &checkentry,
++ .destroy = &destroy,
++ .me = THIS_MODULE
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptables IP set match module");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++#define ipt_register_match xt_register_match
++#define ipt_unregister_match xt_unregister_match
++#endif
++
++static int __init ipt_ipset_init(void)
++{
++ return ipt_register_match(&set_match);
++}
++
++static void __exit ipt_ipset_fini(void)
++{
++ ipt_unregister_match(&set_match);
++}
++
++module_init(ipt_ipset_init);
++module_exit(ipt_ipset_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/ipt_SET.c linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ipt_SET.c
+--- ./linux-2.6.21.5/net/ipv4/netfilter/ipt_SET.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/ipt_SET.c 2007-10-12 14:28:29.000000000 +0200
+@@ -0,0 +1,172 @@
++/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
++ * Patrick Schaaf <bof@bof.de>
++ * Martin Josefsson <gandalf@wlug.westbo.se>
++ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* ipt_SET.c - netfilter target to manipulate IP sets */
++
++#include <linux/types.h>
++#include <linux/ip.h>
++#include <linux/timer.h>
++#include <linux/module.h>
++#include <linux/netfilter.h>
++#include <linux/netdevice.h>
++#include <linux/if.h>
++#include <linux/inetdevice.h>
++#include <linux/version.h>
++#include <net/protocol.h>
++#include <net/checksum.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_ipv4/ip_tables.h>
++#include <linux/netfilter_ipv4/ipt_set.h>
++
++static unsigned int
++target(struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ unsigned int hooknum,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ const void *targinfo,
++ void *userinfo)
++#else
++ const void *targinfo)
++#endif
++{
++ const struct ipt_set_info_target *info = targinfo;
++
++ if (info->add_set.index != IP_SET_INVALID_ID)
++ ip_set_addip_kernel(info->add_set.index,
++ *pskb,
++ info->add_set.flags);
++ if (info->del_set.index != IP_SET_INVALID_ID)
++ ip_set_delip_kernel(info->del_set.index,
++ *pskb,
++ info->del_set.flags);
++
++ return IPT_CONTINUE;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++static bool
++#else
++static int
++#endif
++checkentry(const char *tablename,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ const void *e,
++#else
++ const struct ipt_entry *e,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++ void *targinfo,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ unsigned int targinfosize,
++#endif
++ unsigned int hook_mask)
++{
++ struct ipt_set_info_target *info =
++ (struct ipt_set_info_target *) targinfo;
++ ip_set_id_t index;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
++ DP("bad target info size %u", targinfosize);
++ return 0;
++ }
++#endif
++
++ if (info->add_set.index != IP_SET_INVALID_ID) {
++ index = ip_set_get_byindex(info->add_set.index);
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("cannot find add_set index %u as target",
++ info->add_set.index);
++ return 0; /* error */
++ }
++ }
++
++ if (info->del_set.index != IP_SET_INVALID_ID) {
++ index = ip_set_get_byindex(info->del_set.index);
++ if (index == IP_SET_INVALID_ID) {
++ ip_set_printk("cannot find del_set index %u as target",
++ info->del_set.index);
++ return 0; /* error */
++ }
++ }
++ if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
++ || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
++ ip_set_printk("That's nasty!");
++ return 0; /* error */
++ }
++
++ return 1;
++}
++
++static void destroy(
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ const struct xt_target *target,
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ void *targetinfo, unsigned int targetsize)
++#else
++ void *targetinfo)
++#endif
++{
++ struct ipt_set_info_target *info = targetinfo;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
++ ip_set_printk("invalid targetsize %d", targetsize);
++ return;
++ }
++#endif
++ if (info->add_set.index != IP_SET_INVALID_ID)
++ ip_set_put(info->add_set.index);
++ if (info->del_set.index != IP_SET_INVALID_ID)
++ ip_set_put(info->del_set.index);
++}
++
++static struct ipt_target SET_target = {
++ .name = "SET",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++ .family = AF_INET,
++#endif
++ .target = target,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
++ .targetsize = sizeof(struct ipt_set_info_target),
++#endif
++ .checkentry = checkentry,
++ .destroy = destroy,
++ .me = THIS_MODULE
++};
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
++MODULE_DESCRIPTION("iptables IP set target module");
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
++#define ipt_register_target xt_register_target
++#define ipt_unregister_target xt_unregister_target
++#endif
++
++static int __init ipt_SET_init(void)
++{
++ return ipt_register_target(&SET_target);
++}
++
++static void __exit ipt_SET_fini(void)
++{
++ ipt_unregister_target(&SET_target);
++}
++
++module_init(ipt_SET_init);
++module_exit(ipt_SET_fini);
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/Kconfig linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/Kconfig
+--- ./linux-2.6.21.5/net/ipv4/netfilter/Kconfig 2007-06-11 20:37:06.000000000 +0200
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/Kconfig 2007-10-12 14:28:29.000000000 +0200
+@@ -657,5 +657,122 @@
+ Allows altering the ARP packet payload: source and destination
+ hardware and network addresses.
+
++config IP_NF_SET
++ tristate "IP set support"
++ depends on INET && NETFILTER
++ help
++ This option adds IP set support to the kernel.
++ In order to define and use sets, you need the userspace utility
++ ipset(8).
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_MAX
++ int "Maximum number of IP sets"
++ default 256
++ range 2 65534
++ depends on IP_NF_SET
++ help
++ You can define here default value of the maximum number
++ of IP sets for the kernel.
++
++ The value can be overriden by the 'max_sets' module
++ parameter of the 'ip_set' module.
++
++config IP_NF_SET_HASHSIZE
++ int "Hash size for bindings of IP sets"
++ default 1024
++ depends on IP_NF_SET
++ help
++ You can define here default value of the hash size for
++ bindings of IP sets.
++
++ The value can be overriden by the 'hash_size' module
++ parameter of the 'ip_set' module.
++
++config IP_NF_SET_IPMAP
++ tristate "ipmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_MACIPMAP
++ tristate "macipmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the macipmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_PORTMAP
++ tristate "portmap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the portmap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPHASH
++ tristate "iphash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iphash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_NETHASH
++ tristate "nethash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the nethash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPPORTHASH
++ tristate "ipporthash set support"
++ depends on IP_NF_SET
++ help
++ This option adds the ipporthash set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPTREE
++ tristate "iptree set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iptree set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_SET_IPTREEMAP
++ tristate "iptreemap set support"
++ depends on IP_NF_SET
++ help
++ This option adds the iptreemap set type support.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_MATCH_SET
++ tristate "set match support"
++ depends on IP_NF_SET
++ help
++ Set matching matches against given IP sets.
++ You need the ipset utility to create and set up the sets.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++config IP_NF_TARGET_SET
++ tristate "SET target support"
++ depends on IP_NF_SET
++ help
++ The SET target makes possible to add/delete entries
++ in IP sets.
++ You need the ipset utility to create and set up the sets.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
++
+ endmenu
+
+diff -Nru ./linux-2.6.21.5/net/ipv4/netfilter/Makefile linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/Makefile
+--- ./linux-2.6.21.5/net/ipv4/netfilter/Makefile 2007-06-11 20:37:06.000000000 +0200
++++ linux-2.6.21.5.pom2patch.set/net/ipv4/netfilter/Makefile 2007-10-12 14:28:29.000000000 +0200
+@@ -90,6 +90,7 @@
+ obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
+ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+ obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
++obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
+ obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+
+ # targets
+@@ -105,6 +106,18 @@
+ obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
+ obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
+ obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
++obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
++
++# sets
++obj-$(CONFIG_IP_NF_SET) += ip_set.o
++obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
++obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
++obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
++obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
++obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
++obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
++obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
++obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
+
+ # generic ARP tables
+ obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o