From 2dc09ea8d91a97dd01c675f2903ce5b4d1fd48d3 Mon Sep 17 00:00:00 2001 From: Liang Zhen Date: Sun, 27 Mar 2016 20:26:26 -0400 Subject: [PATCH] staging: lustre: libcfs: add lock-class for cfs_percpt_lock initialise lock-class for each sublock of cfs_percpt_lock to eliminate false alarm ""possible recursive locking detected" Signed-off-by: Liang Zhen Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6432 Reviewed-on: http://review.whamcloud.com/14368 Reviewed-by: James Simmons Reviewed-by: Andreas Dilger Reviewed-by: Oleg Drokin Signed-off-by: Greg Kroah-Hartman --- .../lustre/include/linux/libcfs/libcfs_cpu.h | 19 +++++++++++++++++-- .../staging/lustre/lnet/libcfs/libcfs_lock.c | 13 ++++++++++--- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h index 71ad93b2c6af..81d8079e3b5e 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h @@ -256,8 +256,8 @@ struct cfs_percpt_lock { * create a cpu-partition lock based on CPU partition table \a cptab, * each private lock has extra \a psize bytes padding data */ -struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab); - +struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab, + struct lock_class_key *keys); /* destroy a cpu-partition lock */ void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl); @@ -267,6 +267,21 @@ void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index); /* unlock private lock \a index of \a pcl */ void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index); +#define CFS_PERCPT_LOCK_KEYS 256 + +/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */ +#define cfs_percpt_lock_alloc(cptab) \ +({ \ + static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \ + struct cfs_percpt_lock *___lk; \ + \ + if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \ + ___lk = cfs_percpt_lock_create(cptab, NULL); \ + else \ + ___lk = cfs_percpt_lock_create(cptab, ___keys); \ + ___lk; \ +}) + /** * iterate over all CPU partitions in \a cptab */ diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c index d38954a38f9e..83543f928279 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c @@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free); * reason we always allocate cacheline-aligned memory block. */ struct cfs_percpt_lock * -cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) +cfs_percpt_lock_create(struct cfs_cpt_table *cptab, + struct lock_class_key *keys) { struct cfs_percpt_lock *pcl; spinlock_t *lock; @@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) return NULL; } - cfs_percpt_for_each(lock, i, pcl->pcl_locks) + if (!keys) + CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n"); + + cfs_percpt_for_each(lock, i, pcl->pcl_locks) { spin_lock_init(lock); + if (keys != NULL) + lockdep_set_class(lock, &keys[i]); + } return pcl; } -EXPORT_SYMBOL(cfs_percpt_lock_alloc); +EXPORT_SYMBOL(cfs_percpt_lock_create); /** * lock a CPU partition -- 2.30.2