net/mlx5e: CT: Handle misses after executing CT action
authorPaul Blakey <paulb@mellanox.com>
Thu, 12 Mar 2020 10:23:16 +0000 (12:23 +0200)
committerDavid S. Miller <davem@davemloft.net>
Thu, 12 Mar 2020 22:00:39 +0000 (15:00 -0700)
Mark packets with a unique tupleid, and on miss use that id to get
the act ct restore_cookie. Using that restore cookie, we ask CT to
restore the relevant info on the SKB.

Signed-off-by: Paul Blakey <paulb@mellanox.com>
Reviewed-by: Oz Shlomo <ozsh@mellanox.com>
Reviewed-by: Roi Dayan <roid@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h

index e9826e379aca2c0e3b74dbd0624c8db681fa23d1..c75dc97fd3a7670c11ef5448813c8b0c4f5af788 100644 (file)
@@ -35,6 +35,7 @@ struct mlx5_tc_ct_priv {
        struct mlx5_eswitch *esw;
        const struct net_device *netdev;
        struct idr fte_ids;
+       struct idr tuple_ids;
        struct rhashtable zone_ht;
        struct mlx5_flow_table *ct;
        struct mlx5_flow_table *ct_nat;
@@ -55,6 +56,7 @@ struct mlx5_ct_flow {
 struct mlx5_ct_zone_rule {
        struct mlx5_flow_handle *rule;
        struct mlx5_esw_flow_attr attr;
+       int tupleid;
        bool nat;
 };
 
@@ -76,6 +78,7 @@ struct mlx5_ct_entry {
        struct mlx5_fc *counter;
        unsigned long lastuse;
        unsigned long cookie;
+       unsigned long restore_cookie;
        struct mlx5_ct_zone_rule zone_rules[2];
 };
 
@@ -237,6 +240,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
 
        mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
        mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+       idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
 }
 
 static void
@@ -269,7 +273,8 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
                               struct mlx5e_tc_mod_hdr_acts *mod_acts,
                               u8 ct_state,
                               u32 mark,
-                              u32 label)
+                              u32 label,
+                              u32 tupleid)
 {
        struct mlx5_eswitch *esw = ct_priv->esw;
        int err;
@@ -289,6 +294,11 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
        if (err)
                return err;
 
+       err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+                                       TUPLEID_TO_REG, tupleid);
+       if (err)
+               return err;
+
        return 0;
 }
 
@@ -412,6 +422,7 @@ static int
 mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
                                struct mlx5_esw_flow_attr *attr,
                                struct flow_rule *flow_rule,
+                               u32 tupleid,
                                bool nat)
 {
        struct mlx5e_tc_mod_hdr_acts mod_acts = {};
@@ -442,7 +453,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
                                             (MLX5_CT_STATE_ESTABLISHED_BIT |
                                              MLX5_CT_STATE_TRK_BIT),
                                             meta->ct_metadata.mark,
-                                            meta->ct_metadata.labels[0]);
+                                            meta->ct_metadata.labels[0],
+                                            tupleid);
        if (err)
                goto err_mapping;
 
@@ -473,15 +485,27 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
        struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
        struct mlx5_eswitch *esw = ct_priv->esw;
        struct mlx5_flow_spec spec = {};
+       u32 tupleid = 1;
        int err;
 
        zone_rule->nat = nat;
 
-       err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, nat);
+       /* Get tuple unique id */
+       err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid,
+                           TUPLE_ID_MAX, GFP_KERNEL);
        if (err) {
-               ct_dbg("Failed to create ct entry mod hdr");
+               netdev_warn(ct_priv->netdev,
+                           "Failed to allocate tuple id, err: %d\n", err);
                return err;
        }
+       zone_rule->tupleid = tupleid;
+
+       err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
+                                             tupleid, nat);
+       if (err) {
+               ct_dbg("Failed to create ct entry mod hdr");
+               goto err_mod_hdr;
+       }
 
        attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
                       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
@@ -511,6 +535,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
 
 err_rule:
        mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+err_mod_hdr:
+       idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
        return err;
 }
 
@@ -573,6 +599,7 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
        entry->zone = ft->zone;
        entry->flow_rule = flow_rule;
        entry->cookie = flow->cookie;
+       entry->restore_cookie = meta_action->ct_metadata.cookie;
 
        err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry);
        if (err)
@@ -1188,6 +1215,7 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
        }
 
        idr_init(&ct_priv->fte_ids);
+       idr_init(&ct_priv->tuple_ids);
        mutex_init(&ct_priv->control_lock);
        rhashtable_init(&ct_priv->zone_ht, &zone_params);
 
@@ -1222,8 +1250,31 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
 
        rhashtable_destroy(&ct_priv->zone_ht);
        mutex_destroy(&ct_priv->control_lock);
+       idr_destroy(&ct_priv->tuple_ids);
        idr_destroy(&ct_priv->fte_ids);
        kfree(ct_priv);
 
        uplink_priv->ct_priv = NULL;
 }
+
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+                        struct sk_buff *skb, u32 tupleid)
+{
+       struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+       struct mlx5_ct_zone_rule *zone_rule;
+       struct mlx5_ct_entry *entry;
+
+       if (!ct_priv || !tupleid)
+               return true;
+
+       zone_rule = idr_find(&ct_priv->tuple_ids, tupleid);
+       if (!zone_rule)
+               return false;
+
+       entry = container_of(zone_rule, struct mlx5_ct_entry,
+                            zone_rules[zone_rule->nat]);
+       tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+
+       return true;
+}
index f4bfda77f01a06ebbfd9f10e4a562aa2f4e36db0..464c86595309053dfcb2b5e4edb163c70fce3795 100644 (file)
@@ -64,6 +64,17 @@ struct mlx5_ct_attr {
                                 misc_parameters_2.metadata_reg_c_5),\
 }
 
+#define tupleid_to_reg_ct {\
+       .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
+       .moffset = 0,\
+       .mlen = 3,\
+       .soffset = MLX5_BYTE_OFF(fte_match_param,\
+                                misc_parameters_2.metadata_reg_c_1),\
+}
+
+#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8)
+#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0)
+
 #if IS_ENABLED(CONFIG_MLX5_TC_CT)
 
 int
@@ -92,6 +103,10 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
                       struct mlx5e_tc_flow *flow,
                       struct mlx5_esw_flow_attr *attr);
 
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+                        struct sk_buff *skb, u32 tupleid);
+
 #else /* CONFIG_MLX5_TC_CT */
 
 static inline int
@@ -139,5 +154,15 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
 {
 }
 
+static inline bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+                        struct sk_buff *skb, u32 tupleid)
+{
+       if  (!tupleid)
+               return  true;
+
+       return false;
+}
+
 #endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
 #endif /* __MLX5_EN_TC_CT_H__ */
index 1f6a30623cb7d79ed97f1bc9a7cd2dbf2bdc51ba..5497d5e41cabe40b0086718c7dc9c02121f89c6b 100644 (file)
@@ -200,6 +200,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
        [MARK_TO_REG] = mark_to_reg_ct,
        [LABELS_TO_REG] = labels_to_reg_ct,
        [FTEID_TO_REG] = fteid_to_reg_ct,
+       [TUPLEID_TO_REG] = tupleid_to_reg_ct,
 };
 
 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
@@ -4851,7 +4852,9 @@ bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
                             struct mlx5e_tc_update_priv *tc_priv)
 {
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
-       u32 chain = 0, reg_c0, reg_c1, tunnel_id;
+       u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
+       struct mlx5_rep_uplink_priv *uplink_priv;
+       struct mlx5e_rep_priv *uplink_rpriv;
        struct tc_skb_ext *tc_skb_ext;
        struct mlx5_eswitch *esw;
        struct mlx5e_priv *priv;
@@ -4885,6 +4888,13 @@ bool mlx5e_tc_rep_update_skb(struct mlx5_cqe64 *cqe,
                }
 
                tc_skb_ext->chain = chain;
+
+               tuple_id = reg_c1 & TUPLE_ID_MAX;
+
+               uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+               uplink_priv = &uplink_rpriv->uplink_priv;
+               if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
+                       return false;
        }
 
        tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
index 31c9e81b9287786e197b28557d3921e9edec9fe6..abdcfa4c4e0e3a6684ce786fe76852af312bd5a3 100644 (file)
@@ -99,6 +99,7 @@ enum mlx5e_tc_attr_to_reg {
        MARK_TO_REG,
        LABELS_TO_REG,
        FTEID_TO_REG,
+       TUPLEID_TO_REG,
 };
 
 struct mlx5e_tc_attr_to_reg_mapping {