/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
- struct ipq *next; /* linked list pointers */
+ struct hlist_node list;
struct list_head lru_list; /* lru list member */
u32 user;
u32 saddr;
spinlock_t lock;
atomic_t refcnt;
struct timer_list timer; /* when will this queue expire? */
- struct ipq **pprev;
int iif;
struct timeval stamp;
};
#define IPQ_HASHSZ 64
/* Per-bucket lock is easy to add now. */
-static struct ipq *ipq_hash[IPQ_HASHSZ];
+static struct hlist_head ipq_hash[IPQ_HASHSZ];
static DEFINE_RWLOCK(ipfrag_lock);
static u32 ipfrag_hash_rnd;
static LIST_HEAD(ipq_lru_list);
static __inline__ void __ipq_unlink(struct ipq *qp)
{
- if(qp->next)
- qp->next->pprev = qp->pprev;
- *qp->pprev = qp->next;
+ hlist_del(&qp->list);
list_del(&qp->lru_list);
ip_frag_nqueues--;
}
get_random_bytes(&ipfrag_hash_rnd, sizeof(u32));
for (i = 0; i < IPQ_HASHSZ; i++) {
struct ipq *q;
+ struct hlist_node *p, *n;
- q = ipq_hash[i];
- while (q) {
- struct ipq *next = q->next;
+ hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], list) {
unsigned int hval = ipqhashfn(q->id, q->saddr,
q->daddr, q->protocol);
if (hval != i) {
- /* Unlink. */
- if (q->next)
- q->next->pprev = q->pprev;
- *q->pprev = q->next;
+ hlist_del(&q->list);
/* Relink to new hash chain. */
- if ((q->next = ipq_hash[hval]) != NULL)
- q->next->pprev = &q->next;
- ipq_hash[hval] = q;
- q->pprev = &ipq_hash[hval];
+ hlist_add_head(&q->list, &ipq_hash[hval]);
}
-
- q = next;
}
}
write_unlock(&ipfrag_lock);
static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in)
{
struct ipq *qp;
-
+#ifdef CONFIG_SMP
+ struct hlist_node *n;
+#endif
write_lock(&ipfrag_lock);
#ifdef CONFIG_SMP
/* With SMP race we have to recheck hash table, because
* such entry could be created on other cpu, while we
* promoted read lock to write lock.
*/
- for(qp = ipq_hash[hash]; qp; qp = qp->next) {
+ hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
if(qp->id == qp_in->id &&
qp->saddr == qp_in->saddr &&
qp->daddr == qp_in->daddr &&
atomic_inc(&qp->refcnt);
atomic_inc(&qp->refcnt);
- if((qp->next = ipq_hash[hash]) != NULL)
- qp->next->pprev = &qp->next;
- ipq_hash[hash] = qp;
- qp->pprev = &ipq_hash[hash];
+ hlist_add_head(&qp->list, &ipq_hash[hash]);
INIT_LIST_HEAD(&qp->lru_list);
list_add_tail(&qp->lru_list, &ipq_lru_list);
ip_frag_nqueues++;
__u8 protocol = iph->protocol;
unsigned int hash = ipqhashfn(id, saddr, daddr, protocol);
struct ipq *qp;
+ struct hlist_node *n;
read_lock(&ipfrag_lock);
- for(qp = ipq_hash[hash]; qp; qp = qp->next) {
+ hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
if(qp->id == id &&
qp->saddr == saddr &&
qp->daddr == daddr &&
struct frag_queue
{
- struct frag_queue *next;
+ struct hlist_node list;
struct list_head lru_list; /* lru list member */
__u32 id; /* fragment id */
#define FIRST_IN 2
#define LAST_IN 1
__u16 nhoffset;
- struct frag_queue **pprev;
};
/* Hash table. */
#define IP6Q_HASHSZ 64
-static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ];
+static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ];
static DEFINE_RWLOCK(ip6_frag_lock);
static u32 ip6_frag_hash_rnd;
static LIST_HEAD(ip6_frag_lru_list);
static __inline__ void __fq_unlink(struct frag_queue *fq)
{
- if(fq->next)
- fq->next->pprev = fq->pprev;
- *fq->pprev = fq->next;
+ hlist_del(&fq->list);
list_del(&fq->lru_list);
ip6_frag_nqueues--;
}
get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
for (i = 0; i < IP6Q_HASHSZ; i++) {
struct frag_queue *q;
+ struct hlist_node *p, *n;
- q = ip6_frag_hash[i];
- while (q) {
- struct frag_queue *next = q->next;
+ hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
unsigned int hval = ip6qhashfn(q->id,
&q->saddr,
&q->daddr);
if (hval != i) {
- /* Unlink. */
- if (q->next)
- q->next->pprev = q->pprev;
- *q->pprev = q->next;
+ hlist_del(&q->list);
/* Relink to new hash chain. */
- if ((q->next = ip6_frag_hash[hval]) != NULL)
- q->next->pprev = &q->next;
- ip6_frag_hash[hval] = q;
- q->pprev = &ip6_frag_hash[hval];
- }
+ hlist_add_head(&q->list,
+ &ip6_frag_hash[hval]);
- q = next;
+ }
}
}
write_unlock(&ip6_frag_lock);
struct frag_queue *fq_in)
{
struct frag_queue *fq;
+#ifdef CONFIG_SMP
+ struct hlist_node *n;
+#endif
write_lock(&ip6_frag_lock);
#ifdef CONFIG_SMP
- for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
+ hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
if (fq->id == fq_in->id &&
ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
atomic_inc(&fq->refcnt);
atomic_inc(&fq->refcnt);
- if((fq->next = ip6_frag_hash[hash]) != NULL)
- fq->next->pprev = &fq->next;
- ip6_frag_hash[hash] = fq;
- fq->pprev = &ip6_frag_hash[hash];
+ hlist_add_head(&fq->list, &ip6_frag_hash[hash]);
INIT_LIST_HEAD(&fq->lru_list);
list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
ip6_frag_nqueues++;
fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
{
struct frag_queue *fq;
+ struct hlist_node *n;
unsigned int hash = ip6qhashfn(id, src, dst);
read_lock(&ip6_frag_lock);
- for(fq = ip6_frag_hash[hash]; fq; fq = fq->next) {
+ hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
if (fq->id == id &&
ipv6_addr_equal(src, &fq->saddr) &&
ipv6_addr_equal(dst, &fq->daddr)) {