From 8c2c2b449aa50463ba4cc1f33cdfc98750ed03ab Mon Sep 17 00:00:00 2001 From: Yuyang Du Date: Mon, 6 May 2019 16:19:35 +0800 Subject: [PATCH] locking/lockdep: Refactorize check_noncircular and check_redundant These two functions now handle different check results themselves. A new check_path function is added to check whether there is a path in the dependency graph. No functional change. Signed-off-by: Yuyang Du Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bvanassche@acm.org Cc: frederic@kernel.org Cc: ming.lei@redhat.com Cc: will.deacon@arm.com Link: https://lkml.kernel.org/r/20190506081939.74287-20-duyuyang@gmail.com Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 118 ++++++++++++++++++++++++--------------- 1 file changed, 74 insertions(+), 44 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8169706df767..30a1c0e32573 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1683,33 +1683,90 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) } /* - * Prove that the dependency graph starting at can not - * lead to . Print an error and return 0 if it does. + * Check that the dependency graph starting at can lead to + * or not. Print an error and return 0 if it does. */ static noinline int -check_noncircular(struct lock_list *root, struct lock_class *target, - struct lock_list **target_entry) +check_path(struct lock_class *target, struct lock_list *src_entry, + struct lock_list **target_entry) { - int result; + int ret; + + ret = __bfs_forwards(src_entry, (void *)target, class_equal, + target_entry); + + if (unlikely(ret < 0)) + print_bfs_bug(ret); + + return ret; +} + +/* + * Prove that the dependency graph starting at can not + * lead to . If it can, there is a circle when adding + * -> dependency. + * + * Print an error and return 0 if it does. + */ +static noinline int +check_noncircular(struct held_lock *src, struct held_lock *target, + struct lock_trace *trace) +{ + int ret; + struct lock_list *uninitialized_var(target_entry); + struct lock_list src_entry = { + .class = hlock_class(src), + .parent = NULL, + }; debug_atomic_inc(nr_cyclic_checks); - result = __bfs_forwards(root, target, class_equal, target_entry); + ret = check_path(hlock_class(target), &src_entry, &target_entry); - return result; + if (unlikely(!ret)) { + if (!trace->nr_entries) { + /* + * If save_trace fails here, the printing might + * trigger a WARN but because of the !nr_entries it + * should not do bad things. + */ + save_trace(trace); + } + + print_circular_bug(&src_entry, target_entry, src, target); + } + + return ret; } +/* + * Check that the dependency graph starting at can lead to + * or not. If it can, -> dependency is already + * in the graph. + * + * Print an error and return 2 if it does or 1 if it does not. + */ static noinline int -check_redundant(struct lock_list *root, struct lock_class *target, - struct lock_list **target_entry) +check_redundant(struct held_lock *src, struct held_lock *target) { - int result; + int ret; + struct lock_list *uninitialized_var(target_entry); + struct lock_list src_entry = { + .class = hlock_class(src), + .parent = NULL, + }; debug_atomic_inc(nr_redundant_checks); - result = __bfs_forwards(root, target, class_equal, target_entry); + ret = check_path(hlock_class(target), &src_entry, &target_entry); - return result; + if (!ret) { + debug_atomic_inc(nr_redundant); + ret = 2; + } else if (ret < 0) + ret = 0; + + return ret; } #ifdef CONFIG_TRACE_IRQFLAGS @@ -2307,9 +2364,7 @@ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, struct held_lock *next, int distance, struct lock_trace *trace) { - struct lock_list *uninitialized_var(target_entry); struct lock_list *entry; - struct lock_list this; int ret; if (!hlock_class(prev)->key || !hlock_class(next)->key) { @@ -2340,25 +2395,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes * in the graph whose neighbours are to be checked. */ - this.class = hlock_class(next); - this.parent = NULL; - ret = check_noncircular(&this, hlock_class(prev), &target_entry); - if (unlikely(!ret)) { - if (!trace->nr_entries) { - /* - * If save_trace fails here, the printing might - * trigger a WARN but because of the !nr_entries it - * should not do bad things. - */ - save_trace(trace); - } - print_circular_bug(&this, target_entry, next, prev); + ret = check_noncircular(next, prev, trace); + if (unlikely(ret <= 0)) return 0; - } - else if (unlikely(ret < 0)) { - print_bfs_bug(ret); - return 0; - } if (!check_irq_usage(curr, prev, next)) return 0; @@ -2392,18 +2431,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, /* * Is the -> link redundant? */ - this.class = hlock_class(prev); - this.parent = NULL; - ret = check_redundant(&this, hlock_class(next), &target_entry); - if (!ret) { - debug_atomic_inc(nr_redundant); - return 2; - } - if (ret < 0) { - print_bfs_bug(ret); - return 0; - } - + ret = check_redundant(prev, next); + if (ret != 1) + return ret; if (!trace->nr_entries && !save_trace(trace)) return 0; -- 2.30.2