net: sched: flower: refactor fl_change
authorVlad Buslov <vladbu@mellanox.com>
Thu, 21 Mar 2019 13:17:34 +0000 (15:17 +0200)
committerDavid S. Miller <davem@davemloft.net>
Thu, 21 Mar 2019 21:32:17 +0000 (14:32 -0700)
As a preparation for using classifier spinlock instead of relying on
external rtnl lock, rearrange code in fl_change. The goal is to group the
code which changes classifier state in single block in order to allow
following commits in this set to protect it from parallel modification with
tp->lock. Data structures that require tp->lock protection are mask
hashtable and filters list, and classifier handle_idr.

fl_hw_replace_filter() is a sleeping function and cannot be called while
holding a spinlock. In order to execute all sequence of changes to shared
classifier data structures atomically, call fl_hw_replace_filter() before
modifying them.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/cls_flower.c

index dcf3aee5697eb909ad49b732ebb514caacba93e4..d36ceb5001f9c98574850f2fa10183e2283a078c 100644 (file)
@@ -1376,73 +1376,75 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        if (err)
                goto errout;
 
-       if (!handle) {
-               handle = 1;
-               err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
-                                   INT_MAX, GFP_KERNEL);
-       } else if (!fold) {
-               /* user specifies a handle and it doesn't exist */
-               err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
-                                   handle, GFP_KERNEL);
-       }
-       if (err)
-               goto errout_mask;
-       fnew->handle = handle;
-
-       if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
-               err = -EEXIST;
-               goto errout_idr;
-       }
-
-       err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
-                                    fnew->mask->filter_ht_params);
-       if (err)
-               goto errout_idr;
-
        if (!tc_skip_hw(fnew->flags)) {
                err = fl_hw_replace_filter(tp, fnew, extack);
                if (err)
-                       goto errout_mask_ht;
+                       goto errout_mask;
        }
 
        if (!tc_in_hw(fnew->flags))
                fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 
        if (fold) {
+               fnew->handle = handle;
+
+               err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
+                                            fnew->mask->filter_ht_params);
+               if (err)
+                       goto errout_hw;
+
                rhashtable_remove_fast(&fold->mask->ht,
                                       &fold->ht_node,
                                       fold->mask->filter_ht_params);
-               if (!tc_skip_hw(fold->flags))
-                       fl_hw_destroy_filter(tp, fold, NULL);
-       }
-
-       *arg = fnew;
-
-       if (fold) {
                idr_replace(&head->handle_idr, fnew, fnew->handle);
                list_replace_rcu(&fold->list, &fnew->list);
+
+               if (!tc_skip_hw(fold->flags))
+                       fl_hw_destroy_filter(tp, fold, NULL);
                tcf_unbind_filter(tp, &fold->res);
                tcf_exts_get_net(&fold->exts);
                tcf_queue_work(&fold->rwork, fl_destroy_filter_work);
        } else {
+               if (__fl_lookup(fnew->mask, &fnew->mkey)) {
+                       err = -EEXIST;
+                       goto errout_hw;
+               }
+
+               if (handle) {
+                       /* user specifies a handle and it doesn't exist */
+                       err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+                                           handle, GFP_ATOMIC);
+               } else {
+                       handle = 1;
+                       err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+                                           INT_MAX, GFP_ATOMIC);
+               }
+               if (err)
+                       goto errout_hw;
+
+               fnew->handle = handle;
+
+               err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
+                                            fnew->mask->filter_ht_params);
+               if (err)
+                       goto errout_idr;
+
                list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
        }
 
+       *arg = fnew;
+
        kfree(tb);
        kfree(mask);
        return 0;
 
-errout_mask_ht:
-       rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
-                              fnew->mask->filter_ht_params);
-
 errout_idr:
-       if (!fold)
-               idr_remove(&head->handle_idr, fnew->handle);
-
+       idr_remove(&head->handle_idr, fnew->handle);
+errout_hw:
+       if (!tc_skip_hw(fnew->flags))
+               fl_hw_destroy_filter(tp, fnew, NULL);
 errout_mask:
        fl_mask_put(head, fnew->mask, false);
-
 errout:
        tcf_exts_destroy(&fnew->exts);
        kfree(fnew);