[NETFILTER]: nf_conntrack: fix smp_processor_id() in preemptible code warning
authorPatrick McHardy <kaber@trash.net>
Wed, 27 Feb 2008 20:07:47 +0000 (12:07 -0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 27 Feb 2008 20:07:47 +0000 (12:07 -0800)
Since we're using RCU for the conntrack hash now, we need to avoid
getting preempted or interrupted by BHs while changing the stats.

Fixes warning reported by Tilman Schmidt <tilman@imap.cc> when using
preemptible RCU:

[   48.180297] BUG: using smp_processor_id() in preemptible [00000000] code: ntpdate/3562
[   48.180297] caller is __nf_conntrack_find+0x9b/0xeb [nf_conntrack]
[   48.180297] Pid: 3562, comm: ntpdate Not tainted 2.6.25-rc2-mm1-testing #1
[   48.180297]  [<c02015b9>] debug_smp_processor_id+0x99/0xb0
[   48.180297]  [<fac643a7>] __nf_conntrack_find+0x9b/0xeb [nf_conntrack]

Tested-by: Tilman Schmidt <tilman@imap.cc>
Tested-by: Christian Casteyde <casteyde.christian@free.fr> [Bugzilla #10097]
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/netfilter/nf_conntrack_core.c

index 327e847d2702d8e647ca656b5e89db6ba2ca74fd..b77eb56a87e33a7ec85bd351e5b6d512922ebbc3 100644 (file)
@@ -256,13 +256,19 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple)
        struct hlist_node *n;
        unsigned int hash = hash_conntrack(tuple);
 
+       /* Disable BHs the entire time since we normally need to disable them
+        * at least once for the stats anyway.
+        */
+       local_bh_disable();
        hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
                if (nf_ct_tuple_equal(tuple, &h->tuple)) {
                        NF_CT_STAT_INC(found);
+                       local_bh_enable();
                        return h;
                }
                NF_CT_STAT_INC(searched);
        }
+       local_bh_enable();
 
        return NULL;
 }
@@ -400,17 +406,20 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
        struct hlist_node *n;
        unsigned int hash = hash_conntrack(tuple);
 
-       rcu_read_lock();
+       /* Disable BHs the entire time since we need to disable them at
+        * least once for the stats anyway.
+        */
+       rcu_read_lock_bh();
        hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
                if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
                    nf_ct_tuple_equal(tuple, &h->tuple)) {
                        NF_CT_STAT_INC(found);
-                       rcu_read_unlock();
+                       rcu_read_unlock_bh();
                        return 1;
                }
                NF_CT_STAT_INC(searched);
        }
-       rcu_read_unlock();
+       rcu_read_unlock_bh();
 
        return 0;
 }