*/
#define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
+do { \
+ sk->sk_lock.owner = NULL; \
+ init_waitqueue_head(&sk->sk_lock.wq); \
+ spin_lock_init(&(sk)->sk_lock.slock); \
+ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
+ sizeof((sk)->sk_lock)); \
+ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
+ (skey), (sname)); \
+ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
+} while (0)
+
extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
static inline void lock_sock(struct sock *sk)
}
local_irq_restore(flags);
}
+EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
static void print_held_locks_bug(struct task_struct *curr)
{
*/
static void inline sock_lock_init(struct sock *sk)
{
- spin_lock_init(&sk->sk_lock.slock);
- sk->sk_lock.owner = NULL;
- init_waitqueue_head(&sk->sk_lock.wq);
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
-
- /*
- * Mark both the sk_lock and the sk_lock.slock as a
- * per-address-family lock class:
- */
- lockdep_set_class_and_name(&sk->sk_lock.slock,
- af_family_slock_keys + sk->sk_family,
- af_family_slock_key_strings[sk->sk_family]);
- lockdep_init_map(&sk->sk_lock.dep_map,
- af_family_key_strings[sk->sk_family],
- af_family_keys + sk->sk_family, 0);
+ sock_lock_init_class_and_name(sk,
+ af_family_slock_key_strings[sk->sk_family],
+ af_family_slock_keys + sk->sk_family,
+ af_family_key_strings[sk->sk_family],
+ af_family_keys + sk->sk_family);
}
/**
*/
static int svc_conn_age_period = 6*60;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key svc_key[2];
+static struct lock_class_key svc_slock_key[2];
+
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ BUG_ON(sk->sk_lock.owner != NULL);
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
+ &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
+ break;
+
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
+ &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
+ break;
+
+ default:
+ BUG();
+ }
+}
+#else
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
/*
* Queue up an idle server thread. Must have pool->sp_lock held.
* Note: this is really a stack rather than a queue, so that we only
if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
return error;
+ svc_reclassify_socket(sock);
+
if (type == SOCK_STREAM)
sock->sk->sk_reuse = 1; /* allow address reuse */
error = kernel_bind(sock, (struct sockaddr *) sin,
return err;
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key xs_key[2];
+static struct lock_class_key xs_slock_key[2];
+
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ BUG_ON(sk->sk_lock.owner != NULL);
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS",
+ &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]);
+ break;
+
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS",
+ &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]);
+ break;
+
+ default:
+ BUG();
+ }
+}
+#else
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
/**
* xs_udp_connect_worker - set up a UDP socket
* @work: RPC transport to connect
dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
goto out;
}
+ xs_reclassify_socket(sock);
if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
sock_release(sock);
dprintk("RPC: can't create TCP transport socket (%d).\n", -err);
goto out;
}
+ xs_reclassify_socket(sock);
if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
sock_release(sock);