return 0;
}
-static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
+static int mlx5e_gen_vxlan_header(char buf[],
+ const struct ip_tunnel_key *tun_key)
{
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
struct udphdr *udp = (struct udphdr *)(buf);
return 0;
}
-static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
+static int mlx5e_gen_gre_header(char buf[], const struct ip_tunnel_key *tun_key)
{
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
int hdr_len;
struct mlx5e_encap_entry *e)
{
int err = 0;
- struct ip_tunnel_key *key = &e->tun_info.key;
+ const struct ip_tunnel_key *key = &e->tun_info->key;
if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
*ip_proto = IPPROTO_UDP;
struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
struct neighbour *n = NULL;
struct flowi4 fl4 = {};
struct mlx5e_encap_entry *e)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct net_device *out_dev, *route_dev;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
- int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
+ int dst_port = be16_to_cpu(e->tun_info->key.tp_dst);
if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
NL_SET_ERR_MSG_MOD(extack,
e->tunnel_hlen = VXLAN_HLEN;
} else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
- e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
+ e->tunnel_hlen = gre_calc_hlen(e->tun_info->key.tun_flags);
} else {
e->reformat_type = -1;
e->tunnel_hlen = -1;
};
struct mlx5e_tc_flow_parse_attr {
- struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
}
struct encap_key {
- struct ip_tunnel_key *ip_tun_key;
+ const struct ip_tunnel_key *ip_tun_key;
int tunnel_type;
};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_info *tun_info;
struct encap_key key, e_key;
struct mlx5e_encap_entry *e;
unsigned short family;
int err = 0;
parse_attr = attr->parse_attr;
- tun_info = &parse_attr->tun_info[out_index];
+ tun_info = parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- e_key.ip_tun_key = &e->tun_info.key;
+ e_key.ip_tun_key = &e->tun_info->key;
e_key.tunnel_type = e->tunnel_type;
if (!cmp_encap_info(&e_key, &key)) {
found = true;
if (!e)
return -ENOMEM;
- e->tun_info = *tun_info;
+ e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err;
} else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = *info;
+ parse_attr->tun_info[attr->out_count] = info;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;