rcu: Fix and comment ordering around wait_event()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 24 Sep 2013 22:04:06 +0000 (15:04 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 3 Dec 2013 18:10:18 +0000 (10:10 -0800)
It is all too easy to forget that wait_event() does not necessarily
imply a full memory barrier.  The case where it does not is where the
condition transitions to true just as wait_event() starts execution.
This is actually a feature: The standard use of wait_event() involves
locking, in which case the locks provide the needed ordering (you hold a
lock across the wake_up() and acquire that same lock after wait_event()
returns).

Given that I did forget that wait_event() does not necessarily imply a
full memory barrier in one case, this commit fixes that case.  This commit
also adds comments calling out the placement of existing memory barriers
relied on by wait_event() calls.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/torture.c
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h

index 3929cd451511ef18b03c33d33a34d9b263151153..69a4ec80a788b9abda2c26bc325bd4fefcf23d03 100644 (file)
@@ -1578,6 +1578,7 @@ static int rcu_torture_barrier_cbs(void *arg)
 {
        long myid = (long)arg;
        bool lastphase = 0;
+       bool newphase;
        struct rcu_head rcu;
 
        init_rcu_head_on_stack(&rcu);
@@ -1585,10 +1586,11 @@ static int rcu_torture_barrier_cbs(void *arg)
        set_user_nice(current, 19);
        do {
                wait_event(barrier_cbs_wq[myid],
-                          barrier_phase != lastphase ||
+                          (newphase =
+                           ACCESS_ONCE(barrier_phase)) != lastphase ||
                           kthread_should_stop() ||
                           fullstop != FULLSTOP_DONTSTOP);
-               lastphase = barrier_phase;
+               lastphase = newphase;
                smp_mb(); /* ensure barrier_phase load before ->call(). */
                if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
                        break;
@@ -1625,7 +1627,7 @@ static int rcu_torture_barrier(void *arg)
                if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
                        break;
                n_barrier_attempts++;
-               cur_ops->cb_barrier();
+               cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
                if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
                        n_rcu_torture_barrier_error++;
                        WARN_ON_ONCE(1);
index 5243ebea0fc1eeabd9ca70cb7e0786541e5db2f3..abef9c358d47a45f2a0ba8edd5878c68ba8a385c 100644 (file)
@@ -1533,6 +1533,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
                        __note_gp_changes(rsp, rnp, rdp);
+               /* smp_mb() provided by prior unlock-lock pair. */
                nocb += rcu_future_gp_cleanup(rsp, rnp);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched();
@@ -1577,6 +1578,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        wait_event_interruptible(rsp->gp_wq,
                                                 ACCESS_ONCE(rsp->gp_flags) &
                                                 RCU_GP_FLAG_INIT);
+                       /* Locking provides needed memory barrier. */
                        if (rcu_gp_init(rsp))
                                break;
                        cond_resched();
@@ -1606,6 +1608,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                        (!ACCESS_ONCE(rnp->qsmask) &&
                                         !rcu_preempt_blocked_readers_cgp(rnp)),
                                        j);
+                       /* Locking provides needed memory barriers. */
                        /* If grace period done, leave loop. */
                        if (!ACCESS_ONCE(rnp->qsmask) &&
                            !rcu_preempt_blocked_readers_cgp(rnp))
index 6abb03dff5c053f44ef5dbc28f7bf754e5669e78..b023e5407111ca7b678e096d02e25a0dd966521e 100644 (file)
@@ -779,8 +779,10 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                }
                if (rnp->parent == NULL) {
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
-                       if (wake)
+                       if (wake) {
+                               smp_mb(); /* EGP done before wake_up(). */
                                wake_up(&sync_rcu_preempt_exp_wq);
+                       }
                        break;
                }
                mask = rnp->grpmask;
@@ -1852,6 +1854,7 @@ static int rcu_oom_notify(struct notifier_block *self,
 
        /* Wait for callbacks from earlier instance to complete. */
        wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
+       smp_mb(); /* Ensure callback reuse happens after callback invocation. */
 
        /*
         * Prevent premature wakeup: ensure that all increments happen
@@ -2250,6 +2253,7 @@ static int rcu_nocb_kthread(void *arg)
                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
                                            TPS("Sleep"));
                        wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
+                       /* Memory barrier provide by xchg() below. */
                } else if (firsttime) {
                        firsttime = 0;
                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,