This required moving tcp_bucket_cachep to inet_hashinfo.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
#ifndef _INET_HASHTABLES_H
#define _INET_HASHTABLES_H
+#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <net/sock.h>
+
/* This is for all connections with a full identity, no wildcards.
* New scheme, half the table is for TIME_WAIT, the other half is
* for the rest. I'll experiment with dynamic table growth later.
atomic_t lhash_users;
wait_queue_head_t lhash_wait;
spinlock_t portalloc_lock;
+ kmem_cache_t *bind_bucket_cachep;
};
static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
return lport & (bhash_size - 1);
}
+extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ const unsigned short snum);
+
/* These can have wildcards, don't try too hard. */
static inline int inet_lhashfn(const unsigned short num)
{
return inet_lhashfn(inet_sk(sk)->num);
}
+/* Caller must disable local BH processing. */
+static inline void __inet_inherit_port(struct inet_hashinfo *table,
+ struct sock *sk, struct sock *child)
+{
+ const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
+ struct inet_bind_hashbucket *head = &table->bhash[bhash];
+ struct inet_bind_bucket *tb;
+
+ spin_lock(&head->lock);
+ tb = inet_sk(sk)->bind_hash;
+ sk_add_bind_node(child, &tb->owners);
+ inet_sk(child)->bind_hash = tb;
+ spin_unlock(&head->lock);
+}
+
+static inline void inet_inherit_port(struct inet_hashinfo *table,
+ struct sock *sk, struct sock *child)
+{
+ local_bh_disable();
+ __inet_inherit_port(table, sk, child);
+ local_bh_enable();
+}
+
+extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
+
#endif /* _INET_HASHTABLES_H */
#define tcp_lhash_users (tcp_hashinfo.lhash_users)
#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)
-
-extern kmem_cache_t *tcp_bucket_cachep;
+#define tcp_bucket_cachep (tcp_hashinfo.bind_bucket_cachep)
extern int tcp_port_rover;
-extern void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- unsigned short snum);
-
#if (BITS_PER_LONG == 64)
#define TCP_ADDRCMP_ALIGN_BYTES 8
#else
#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
-extern void tcp_put_port(struct sock *sk);
-extern void tcp_inherit_port(struct sock *sk, struct sock *child);
-
extern void tcp_v4_err(struct sk_buff *skb, u32);
extern void tcp_shutdown (struct sock *sk, int how);
sk->sk_prot->unhash(sk);
if (inet_sk(sk)->bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
- tcp_put_port(sk);
+ inet_put_port(&tcp_hashinfo, sk);
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
*/
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <net/inet_hashtables.h>
kmem_cache_free(cachep, tb);
}
}
+
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ const unsigned short snum)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ inet->num = snum;
+ sk_add_bind_node(sk, &tb->owners);
+ inet->bind_hash = tb;
+}
+
+EXPORT_SYMBOL(inet_bind_hash);
+
+/*
+ * Get rid of any references to a local port held by the given sock.
+ */
+static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ const int bhash = inet_bhashfn(inet->num, hashinfo->bhash_size);
+ struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
+ struct inet_bind_bucket *tb;
+
+ spin_lock(&head->lock);
+ tb = inet->bind_hash;
+ __sk_del_bind_node(sk);
+ inet->bind_hash = NULL;
+ inet->num = 0;
+ inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
+ spin_unlock(&head->lock);
+}
+
+void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
+{
+ local_bh_disable();
+ __inet_put_port(hashinfo, sk);
+ local_bh_enable();
+}
+
+EXPORT_SYMBOL(inet_put_port);
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
-kmem_cache_t *tcp_bucket_cachep;
-
-EXPORT_SYMBOL_GPL(tcp_bucket_cachep);
-
kmem_cache_t *tcp_timewait_cachep;
atomic_t tcp_orphan_count = ATOMIC_INIT(0);
int sysctl_local_port_range[2] = { 1024, 4999 };
int tcp_port_rover = 1024 - 1;
-/* Caller must disable local BH processing. */
-static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
-{
- struct inet_bind_hashbucket *head =
- &tcp_bhash[inet_bhashfn(inet_sk(child)->num,
- tcp_bhash_size)];
- struct inet_bind_bucket *tb;
-
- spin_lock(&head->lock);
- tb = inet_sk(sk)->bind_hash;
- sk_add_bind_node(child, &tb->owners);
- inet_sk(child)->bind_hash = tb;
- spin_unlock(&head->lock);
-}
-
-inline void tcp_inherit_port(struct sock *sk, struct sock *child)
-{
- local_bh_disable();
- __tcp_inherit_port(sk, child);
- local_bh_enable();
-}
-
-void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- const unsigned short snum)
-{
- struct inet_sock *inet = inet_sk(sk);
- inet->num = snum;
- sk_add_bind_node(sk, &tb->owners);
- inet->bind_hash = tb;
-}
-
static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
{
const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
tb->fastreuse = 0;
success:
if (!inet_sk(sk)->bind_hash)
- tcp_bind_hash(sk, tb, snum);
+ inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_sk(sk)->bind_hash == tb);
ret = 0;
return ret;
}
-/* Get rid of any references to a local port held by the
- * given sock.
- */
-static void __tcp_put_port(struct sock *sk)
-{
- struct inet_sock *inet = inet_sk(sk);
- struct inet_bind_hashbucket *head = &tcp_bhash[inet_bhashfn(inet->num,
- tcp_bhash_size)];
- struct inet_bind_bucket *tb;
-
- spin_lock(&head->lock);
- tb = inet->bind_hash;
- __sk_del_bind_node(sk);
- inet->bind_hash = NULL;
- inet->num = 0;
- inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
- spin_unlock(&head->lock);
-}
-
-void tcp_put_port(struct sock *sk)
-{
- local_bh_disable();
- __tcp_put_port(sk);
- local_bh_enable();
-}
-
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
* Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
hint += i;
/* Head lock still held and bh's disabled */
- tcp_bind_hash(sk, tb, port);
+ inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->sport = htons(port);
__tcp_v4_hash(sk, 0);
tcp_initialize_rcv_mss(newsk);
__tcp_v4_hash(newsk, 0);
- __tcp_inherit_port(sk, newsk);
+ __inet_inherit_port(&tcp_hashinfo, sk, newsk);
return newsk;
/* Clean up a referenced TCP bind bucket. */
if (inet_sk(sk)->bind_hash)
- tcp_put_port(sk);
+ inet_put_port(&tcp_hashinfo, sk);
/*
* If sendmsg cached page exists, toss it.
}
EXPORT_SYMBOL(ipv4_specific);
-EXPORT_SYMBOL(tcp_bind_hash);
EXPORT_SYMBOL(inet_bind_bucket_create);
EXPORT_SYMBOL(tcp_hashinfo);
-EXPORT_SYMBOL(tcp_inherit_port);
EXPORT_SYMBOL(tcp_listen_wlock);
EXPORT_SYMBOL(tcp_port_rover);
EXPORT_SYMBOL(tcp_prot);
-EXPORT_SYMBOL(tcp_put_port);
EXPORT_SYMBOL(tcp_unhash);
EXPORT_SYMBOL(tcp_v4_conn_request);
EXPORT_SYMBOL(tcp_v4_connect);
success:
if (!inet_sk(sk)->bind_hash)
- tcp_bind_hash(sk, tb, snum);
+ inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_sk(sk)->bind_hash == tb);
ret = 0;
hint += i;
/* Head lock still held and bh's disabled */
- tcp_bind_hash(sk, tb, port);
+ inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->sport = htons(port);
__tcp_v6_hash(sk);
newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
__tcp_v6_hash(newsk);
- tcp_inherit_port(sk, newsk);
+ inet_inherit_port(&tcp_hashinfo, sk, newsk);
return newsk;