rcu: Convert ->rcu_iw_gpnum to ->gp_seq
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sat, 28 Apr 2018 21:15:40 +0000 (14:15 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 12 Jul 2018 21:27:53 +0000 (14:27 -0700)
This commit switches the interrupt-disabled detection mechanism to
->gp_seq.  This mechanism is used as part of RCU CPU stall warnings,
and detects cases where the stall is due to a CPU having interrupts
disabled.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h

index 56445f4c09a80efe5f024e1ddf5dcb939f1c64ad..2ddbd1cfb31a72941774e73855f36d71bcb338a9 100644 (file)
@@ -1099,8 +1099,8 @@ static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
        if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
                         rnp->gp_seq))
                WRITE_ONCE(rdp->gpwrap, true);
-       if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
-               rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
+       if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
+               rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
 }
 
 /*
@@ -1134,7 +1134,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
        rnp = rdp->mynode;
        raw_spin_lock_rcu_node(rnp);
        if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
-               rdp->rcu_iw_gpnum = rnp->gpnum;
+               rdp->rcu_iw_gp_seq = rnp->gp_seq;
                rdp->rcu_iw_pending = false;
        }
        raw_spin_unlock_rcu_node(rnp);
@@ -1231,11 +1231,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
                resched_cpu(rdp->cpu);
                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
-                   !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
+                   !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
                    (rnp->ffmask & rdp->grpmask)) {
                        init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
                        rdp->rcu_iw_pending = true;
-                       rdp->rcu_iw_gpnum = rnp->gpnum;
+                       rdp->rcu_iw_gp_seq = rnp->gp_seq;
                        irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
                }
        }
@@ -3575,7 +3575,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
        rdp->core_needs_qs = false;
        rdp->rcu_iw_pending = false;
-       rdp->rcu_iw_gpnum = rnp->gpnum - 1;
+       rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
index 50a28d1cf5a1b277363aa468d3c4551d56b587d1..6d6cbc8b3a9c71ebca3549a98609663105c8e88d 100644 (file)
@@ -286,7 +286,7 @@ struct rcu_data {
        /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
        struct irq_work rcu_iw;         /* Check for non-irq activity. */
        bool rcu_iw_pending;            /* Is ->rcu_iw pending? */
-       unsigned long rcu_iw_gpnum;     /* ->gpnum associated with ->rcu_iw. */
+       unsigned long rcu_iw_gp_seq;    /* ->gp_seq associated with ->rcu_iw. */
 
        int cpu;
        struct rcu_state *rsp;
index 5b10904669c57b425ef77e25d7215df354dcc3aa..bc32e1f434a6f50358ee05a36cfcc5ff9d6ec796 100644 (file)
@@ -1763,7 +1763,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
                ticks_value = rsp->gpnum - rdp->gpnum;
        }
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
+       delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
        pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
               cpu,
               "O."[!!cpu_online(cpu)],