#define MLX5E_TC_MAX_SPLITS 1
+enum {
+ MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
+ MLX5E_TC_TUNNEL_TYPE_VXLAN
+};
+
+static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev);
+
struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
}
}
+static const char *mlx5e_netdev_kind(struct net_device *dev)
+{
+ if (dev->rtnl_link_ops)
+ return dev->rtnl_link_ops->kind;
+ else
+ return "";
+}
+
static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow;
int err;
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
e->encap_size, e->encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
f->key);
+ int tunnel_type;
+ int err = 0;
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- int err = 0;
-
+ tunnel_type = mlx5e_get_tunnel_type(filter_dev);
+ if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
err = parse_tunnel_vxlan_attr(priv, spec, f,
headers_c, headers_v);
- if (err)
- goto vxlan_match_offload_err;
- } else { /* udp dst port must be given */
-vxlan_match_offload_err:
+ } else {
NL_SET_ERR_MSG_MOD(extack,
- "IP tunnel decap offload supported only for vxlan, must set UDP dport");
+ "decapsulation offload is not supported");
netdev_warn(priv->netdev,
- "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
+ "decapsulation offload is not supported for %s net device (%d)\n",
+ mlx5e_netdev_kind(filter_dev), tunnel_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "failed to parse tunnel attributes");
+ netdev_warn(priv->netdev,
+ "failed to parse %s tunnel attributes (%d)\n",
+ mlx5e_netdev_kind(filter_dev), tunnel_type);
return -EOPNOTSUPP;
}
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev,
u8 *match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
switch (key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
- if (parse_tunnel_attr(priv, spec, f))
+ if (parse_tunnel_attr(priv, spec, f, filter_dev))
return -EOPNOTSUPP;
break;
default:
static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
u8 match_level;
int err;
- err = __parse_cls_flower(priv, spec, f, &match_level);
+ err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
}
static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
- struct ip_tunnel_key *tun_key)
+ struct mlx5e_encap_entry *e)
{
- *ip_proto = IPPROTO_UDP;
- return mlx5e_gen_vxlan_header(buf, tun_key);
+ int err = 0;
+ struct ip_tunnel_key *key = &e->tun_info.key;
+
+ if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ *ip_proto = IPPROTO_UDP;
+ err = mlx5e_gen_vxlan_header(buf, key);
+ } else {
+ pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
+ , e->tunnel_type);
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
}
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv4_encap_size = ETH_HLEN +
sizeof(struct iphdr) +
- VXLAN_HLEN;
+ e->tunnel_hlen;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
struct net_device *out_dev;
struct neighbour *n = NULL;
/* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
- &ip->protocol, tun_key);
+ &ip->protocol, e);
if (err)
goto destroy_neigh_entry;
goto out;
}
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
ipv4_encap_size, encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN +
sizeof(struct ipv6hdr) +
- VXLAN_HLEN;
+ e->tunnel_hlen;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
struct net_device *out_dev;
struct neighbour *n = NULL;
/* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
- &ip6h->nexthdr, tun_key);
+ &ip6h->nexthdr, e);
if (err)
goto destroy_neigh_entry;
goto out;
}
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
ipv6_encap_size, encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
return err;
}
+static int mlx5e_get_tunnel_type(struct net_device *tunnel_dev)
+{
+ if (netif_is_vxlan(tunnel_dev))
+ return MLX5E_TC_TUNNEL_TYPE_VXLAN;
+ else
+ return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
+}
+
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
{
- if (netif_is_vxlan(netdev) &&
+ int tunnel_type = mlx5e_get_tunnel_type(netdev);
+
+ if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
return true;
+ else
+ return false;
+}
- return false;
+static int mlx5e_init_tunnel_attr(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel_type = mlx5e_get_tunnel_type(tunnel_dev);
+
+ if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
+
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vxlan udp dport was not registered with the HW");
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n",
+ dst_port);
+ return -EOPNOTSUPP;
+ }
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
+ e->tunnel_hlen = VXLAN_HLEN;
+ } else {
+ e->reformat_type = -1;
+ e->tunnel_hlen = -1;
+ return -EOPNOTSUPP;
+ }
+ return 0;
}
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5e_encap_entry *e;
- int tunnel_type, err = 0;
uintptr_t hash_key;
bool found = false;
-
- if (!MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vxlan HW offloading is not supported");
- netdev_warn(priv->netdev,
- "vxlan HW offloading is not supported\n");
- return -EOPNOTSUPP;
- }
-
- if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
- MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
- tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "port isn't an offloaded vxlan udp dport");
- netdev_warn(priv->netdev,
- "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
- return -EOPNOTSUPP;
- }
+ int err = 0;
hash_key = hash_encap_info(key);
return -ENOMEM;
e->tun_info = *tun_info;
- e->tunnel_type = tunnel_type;
+ err = mlx5e_init_tunnel_attr(mirred_dev, priv, e, extack);
+ if (err)
+ goto out_err;
+
INIT_LIST_HEAD(&e->flows);
if (family == AF_INET)
goto out;
parse_attr->filter_dev = filter_dev;
flow->esw_attr->parse_attr = parse_attr;
- err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, f);
+ err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
+ f, filter_dev);
if (err)
goto err_free;
goto out;
parse_attr->filter_dev = filter_dev;
- err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, f);
+ err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
+ f, filter_dev);
if (err)
goto err_free;