struct mlx5_eswitch *esw;
const struct net_device *netdev;
struct idr fte_ids;
+ struct idr tuple_ids;
struct rhashtable zone_ht;
struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat;
struct mlx5_ct_zone_rule {
struct mlx5_flow_handle *rule;
struct mlx5_esw_flow_attr attr;
+ int tupleid;
bool nat;
};
struct mlx5_fc *counter;
unsigned long lastuse;
unsigned long cookie;
+ unsigned long restore_cookie;
struct mlx5_ct_zone_rule zone_rules[2];
};
mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+ idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
}
static void
struct mlx5e_tc_mod_hdr_acts *mod_acts,
u8 ct_state,
u32 mark,
- u32 label)
+ u32 label,
+ u32 tupleid)
{
struct mlx5_eswitch *esw = ct_priv->esw;
int err;
if (err)
return err;
+ err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ TUPLEID_TO_REG, tupleid);
+ if (err)
+ return err;
+
return 0;
}
mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_esw_flow_attr *attr,
struct flow_rule *flow_rule,
+ u32 tupleid,
bool nat)
{
struct mlx5e_tc_mod_hdr_acts mod_acts = {};
(MLX5_CT_STATE_ESTABLISHED_BIT |
MLX5_CT_STATE_TRK_BIT),
meta->ct_metadata.mark,
- meta->ct_metadata.labels[0]);
+ meta->ct_metadata.labels[0],
+ tupleid);
if (err)
goto err_mapping;
struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_flow_spec spec = {};
+ u32 tupleid = 1;
int err;
zone_rule->nat = nat;
- err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, nat);
+ /* Get tuple unique id */
+ err = idr_alloc_u32(&ct_priv->tuple_ids, zone_rule, &tupleid,
+ TUPLE_ID_MAX, GFP_KERNEL);
if (err) {
- ct_dbg("Failed to create ct entry mod hdr");
+ netdev_warn(ct_priv->netdev,
+ "Failed to allocate tuple id, err: %d\n", err);
return err;
}
+ zone_rule->tupleid = tupleid;
+
+ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
+ tupleid, nat);
+ if (err) {
+ ct_dbg("Failed to create ct entry mod hdr");
+ goto err_mod_hdr;
+ }
attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
err_rule:
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+err_mod_hdr:
+ idr_remove(&ct_priv->tuple_ids, zone_rule->tupleid);
return err;
}
entry->zone = ft->zone;
entry->flow_rule = flow_rule;
entry->cookie = flow->cookie;
+ entry->restore_cookie = meta_action->ct_metadata.cookie;
err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry);
if (err)
}
idr_init(&ct_priv->fte_ids);
+ idr_init(&ct_priv->tuple_ids);
mutex_init(&ct_priv->control_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params);
rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock);
+ idr_destroy(&ct_priv->tuple_ids);
idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv);
uplink_priv->ct_priv = NULL;
}
+
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid)
+{
+ struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+ struct mlx5_ct_zone_rule *zone_rule;
+ struct mlx5_ct_entry *entry;
+
+ if (!ct_priv || !tupleid)
+ return true;
+
+ zone_rule = idr_find(&ct_priv->tuple_ids, tupleid);
+ if (!zone_rule)
+ return false;
+
+ entry = container_of(zone_rule, struct mlx5_ct_entry,
+ zone_rules[zone_rule->nat]);
+ tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+
+ return true;
+}
misc_parameters_2.metadata_reg_c_5),\
}
+#define tupleid_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
+ .moffset = 0,\
+ .mlen = 3,\
+ .soffset = MLX5_BYTE_OFF(fte_match_param,\
+ misc_parameters_2.metadata_reg_c_1),\
+}
+
+#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8)
+#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0)
+
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
int
struct mlx5e_tc_flow *flow,
struct mlx5_esw_flow_attr *attr);
+bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid);
+
#else /* CONFIG_MLX5_TC_CT */
static inline int
{
}
+static inline bool
+mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+ struct sk_buff *skb, u32 tupleid)
+{
+ if (!tupleid)
+ return true;
+
+ return false;
+}
+
#endif /* !IS_ENABLED(CONFIG_MLX5_TC_CT) */
#endif /* __MLX5_EN_TC_CT_H__ */
[MARK_TO_REG] = mark_to_reg_ct,
[LABELS_TO_REG] = labels_to_reg_ct,
[FTEID_TO_REG] = fteid_to_reg_ct,
+ [TUPLEID_TO_REG] = tupleid_to_reg_ct,
};
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_update_priv *tc_priv)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- u32 chain = 0, reg_c0, reg_c1, tunnel_id;
+ u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
struct tc_skb_ext *tc_skb_ext;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
}
tc_skb_ext->chain = chain;
+
+ tuple_id = reg_c1 & TUPLE_ID_MAX;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
+ return false;
}
tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;