return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
struct in6_addr *gw;
int ifindex, weight;
- ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
- weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
- gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
+ ifindex = fib6_nh->fib_nh_dev->ifindex;
+ weight = fib6_nh->fib_nh_weight;
+ gw = &fib6_nh->fib_nh_gw6;
if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
weight))
return false;
struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
+ dev = mlxsw_sp_rt6->rt->fib6_nh.fib_nh_dev;
val ^= dev->ifindex;
}
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
+ if (nh->rif && nh->rif->dev == rt->fib6_nh.fib_nh_dev &&
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
- &rt->fib6_nh.nh_gw))
+ &rt->fib6_nh.fib_nh_gw6))
return nh;
continue;
}
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
+ list)->rt->fib6_nh.fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+ struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
struct mlxsw_sp_nexthop *nh;
nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
if (nh && nh->offloaded)
- mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
+ fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
else
- mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
+ rt->fib6_nh.fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
const struct fib6_info *rt,
enum mlxsw_sp_ipip_type *ret)
{
- return rt->fib6_nh.nh_dev &&
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
+ return rt->fib6_nh.fib_nh_dev &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.fib_nh_dev, ret);
}
static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
struct mlxsw_sp_rif *rif;
int err;
struct mlxsw_sp_nexthop *nh,
const struct fib6_info *rt)
{
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = rt->fib6_nh.nh_weight;
- memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
+ nh->nh_weight = rt->fib6_nh.fib_nh_weight;
+ memcpy(&nh->gw_addr, &rt->fib6_nh.fib_nh_gw6, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
if (!fl6->mp_hash)
fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
- if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound))
+ if (fl6->mp_hash <= atomic_read(&match->fib6_nh.fib_nh_upper_bound))
return match;
list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
fib6_siblings) {
int nh_upper_bound;
- nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound);
+ nh_upper_bound = atomic_read(&sibling->fib6_nh.fib_nh_upper_bound);
if (fl6->mp_hash > nh_upper_bound)
continue;
if (rt6_score_route(sibling, oif, strict) < 0)
struct fib6_info *sprt;
if (!oif && ipv6_addr_any(saddr) &&
- !(rt->fib6_nh.nh_flags & RTNH_F_DEAD))
+ !(rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD))
return rt;
for (sprt = rt; sprt; sprt = rcu_dereference(sprt->fib6_next)) {
- const struct net_device *dev = sprt->fib6_nh.nh_dev;
+ const struct net_device *dev = sprt->fib6_nh.fib_nh_dev;
- if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD)
+ if (sprt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
continue;
if (oif) {
if (oif && flags & RT6_LOOKUP_F_IFACE)
return net->ipv6.fib6_null_entry;
- return rt->fib6_nh.nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt;
+ return rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt;
}
#ifdef CONFIG_IPV6_ROUTER_PREF
if (!rt || !rt->fib6_nh.fib_nh_has_gw)
return;
- nh_gw = &rt->fib6_nh.nh_gw;
- dev = rt->fib6_nh.nh_dev;
+ nh_gw = &rt->fib6_nh.fib_nh_gw6;
+ dev = rt->fib6_nh.fib_nh_dev;
rcu_read_lock_bh();
idev = __in6_dev_get(dev);
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
*/
static inline int rt6_check_dev(struct fib6_info *rt, int oif)
{
- const struct net_device *dev = rt->fib6_nh.nh_dev;
+ const struct net_device *dev = rt->fib6_nh.fib_nh_dev;
if (!oif || dev->ifindex == oif)
return 2;
return RT6_NUD_SUCCEED;
rcu_read_lock_bh();
- neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.nh_dev,
- &rt->fib6_nh.nh_gw);
+ neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.fib_nh_dev,
+ &rt->fib6_nh.fib_nh_gw6);
if (neigh) {
read_lock(&neigh->lock);
if (neigh->nud_state & NUD_VALID)
int m;
bool match_do_rr = false;
- if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
+ if (rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
goto out;
- if (ip6_ignore_linkdown(rt->fib6_nh.nh_dev) &&
- rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
+ if (ip6_ignore_linkdown(rt->fib6_nh.fib_nh_dev) &&
+ rt->fib6_nh.fib_nh_flags & RTNH_F_LINKDOWN &&
!(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
goto out;
/* called with rcu_lock held */
static struct net_device *ip6_rt_get_dev_rcu(struct fib6_info *rt)
{
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
/* for copies of local routes, dst->dev needs to be the
rt->dst.input = ip6_forward;
}
- if (ort->fib6_nh.nh_lwtstate) {
- rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
+ if (ort->fib6_nh.fib_nh_lws) {
+ rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.fib_nh_lws);
lwtunnel_set_redirect(&rt->dst);
}
rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
rt->rt6i_flags = ort->fib6_flags;
if (ort->fib6_nh.fib_nh_has_gw) {
- rt->rt6i_gateway = ort->fib6_nh.nh_gw;
+ rt->rt6i_gateway = ort->fib6_nh.fib_nh_gw6;
rt->rt6i_flags |= RTF_GATEWAY;
}
rt6_set_from(rt, ort);
static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
{
unsigned short flags = fib6_info_dst_flags(rt);
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
struct rt6_info *nrt;
if (!fib6_info_hold_safe(rt))
mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
- return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu);
+ return mtu - lwtunnel_headroom(rt->fib6_nh.fib_nh_lws, mtu);
}
static int rt6_insert_exception(struct rt6_info *nrt,
fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
restart:
for_each_fib6_node_rt_rcu(fn) {
- if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
+ if (rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
continue;
if (fib6_check_expired(rt))
continue;
break;
if (!rt->fib6_nh.fib_nh_has_gw)
continue;
- if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex)
+ if (fl6->flowi6_oif != rt->fib6_nh.fib_nh_dev->ifindex)
continue;
/* rt_cache's gateway might be different from its 'parent'
* in the case of an ip redirect.
* So we keep searching in the exception table if the gateway
* is different.
*/
- if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.nh_gw)) {
+ if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.fib_nh_gw6)) {
rt_cache = rt6_find_cached_rt(rt,
&fl6->daddr,
&fl6->saddr);
goto out;
}
- fib6_nh->nh_flags |= RTNH_F_ONLINK;
+ fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
}
if (cfg->fc_encap) {
if (err)
goto out;
- fib6_nh->nh_lwtstate = lwtstate_get(lwtstate);
+ fib6_nh->fib_nh_lws = lwtstate_get(lwtstate);
}
- fib6_nh->nh_weight = 1;
+ fib6_nh->fib_nh_weight = 1;
/* We cannot add true routes via loopback here,
* they would result in kernel looping; promote them to reject routes
if (err)
goto out;
- fib6_nh->nh_gw = cfg->fc_gateway;
+ fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
fib6_nh->fib_nh_has_gw = 1;
}
if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
!netif_carrier_ok(dev))
- fib6_nh->nh_flags |= RTNH_F_LINKDOWN;
+ fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
set_dev:
- fib6_nh->nh_dev = dev;
+ fib6_nh->fib_nh_dev = dev;
err = 0;
out:
if (idev)
in6_dev_put(idev);
if (err) {
- lwtstate_put(fib6_nh->nh_lwtstate);
- fib6_nh->nh_lwtstate = NULL;
+ lwtstate_put(fib6_nh->fib_nh_lws);
+ fib6_nh->fib_nh_lws = NULL;
if (dev)
dev_put(dev);
}
void fib6_nh_release(struct fib6_nh *fib6_nh)
{
- lwtstate_put(fib6_nh->nh_lwtstate);
+ lwtstate_put(fib6_nh->fib_nh_lws);
- if (fib6_nh->nh_dev)
- dev_put(fib6_nh->nh_dev);
+ if (fib6_nh->fib_nh_dev)
+ dev_put(fib6_nh->fib_nh_dev);
}
static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
* they would result in kernel looping; promote them to reject routes
*/
addr_type = ipv6_addr_type(&cfg->fc_dst);
- if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh.nh_dev, addr_type))
+ if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh.fib_nh_dev, addr_type))
rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
if (fn) {
for_each_fib6_node_rt_rcu(fn) {
+ struct fib6_nh *nh;
+
if (cfg->fc_flags & RTF_CACHE) {
int rc;
}
continue;
}
+
+ nh = &rt->fib6_nh;
if (cfg->fc_ifindex &&
- (!rt->fib6_nh.nh_dev ||
- rt->fib6_nh.nh_dev->ifindex != cfg->fc_ifindex))
+ (!nh->fib_nh_dev ||
+ nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
continue;
if (cfg->fc_flags & RTF_GATEWAY &&
- !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw))
+ !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
continue;
goto out;
for_each_fib6_node_rt_rcu(fn) {
- if (rt->fib6_nh.nh_dev->ifindex != ifindex)
+ if (rt->fib6_nh.fib_nh_dev->ifindex != ifindex)
continue;
if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
!rt->fib6_nh.fib_nh_has_gw)
continue;
- if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
+ if (!ipv6_addr_equal(&rt->fib6_nh.fib_nh_gw6, gwaddr))
continue;
if (!fib6_info_hold_safe(rt))
continue;
rcu_read_lock();
for_each_fib6_node_rt_rcu(&table->tb6_root) {
- if (dev == rt->fib6_nh.nh_dev &&
+ struct fib6_nh *nh = &rt->fib6_nh;
+
+ if (dev == nh->fib_nh_dev &&
((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
- ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
+ ipv6_addr_equal(&nh->fib_nh_gw6, addr))
break;
}
if (rt && !fib6_info_hold_safe(rt))
struct net *net = ((struct arg_dev_net_ip *)arg)->net;
struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
- if (((void *)rt->fib6_nh.nh_dev == dev || !dev) &&
+ if (((void *)rt->fib6_nh.fib_nh_dev == dev || !dev) &&
rt != net->ipv6.fib6_null_entry &&
ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
spin_lock_bh(&rt6_exception_lock);
if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
rt->fib6_nh.fib_nh_has_gw &&
- ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) {
+ ipv6_addr_equal(gateway, &rt->fib6_nh.fib_nh_gw6)) {
return -1;
}
static bool rt6_is_dead(const struct fib6_info *rt)
{
- if (rt->fib6_nh.nh_flags & RTNH_F_DEAD ||
- (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
- ip6_ignore_linkdown(rt->fib6_nh.nh_dev)))
+ if (rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD ||
+ (rt->fib6_nh.fib_nh_flags & RTNH_F_LINKDOWN &&
+ ip6_ignore_linkdown(rt->fib6_nh.fib_nh_dev)))
return true;
return false;
int total = 0;
if (!rt6_is_dead(rt))
- total += rt->fib6_nh.nh_weight;
+ total += rt->fib6_nh.fib_nh_weight;
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
if (!rt6_is_dead(iter))
- total += iter->fib6_nh.nh_weight;
+ total += iter->fib6_nh.fib_nh_weight;
}
return total;
int upper_bound = -1;
if (!rt6_is_dead(rt)) {
- *weight += rt->fib6_nh.nh_weight;
+ *weight += rt->fib6_nh.fib_nh_weight;
upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
total) - 1;
}
- atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound);
+ atomic_set(&rt->fib6_nh.fib_nh_upper_bound, upper_bound);
}
static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
const struct arg_netdev_event *arg = p_arg;
struct net *net = dev_net(arg->dev);
- if (rt != net->ipv6.fib6_null_entry && rt->fib6_nh.nh_dev == arg->dev) {
- rt->fib6_nh.nh_flags &= ~arg->nh_flags;
+ if (rt != net->ipv6.fib6_null_entry &&
+ rt->fib6_nh.fib_nh_dev == arg->dev) {
+ rt->fib6_nh.fib_nh_flags &= ~arg->nh_flags;
fib6_update_sernum_upto_root(net, rt);
rt6_multipath_rebalance(rt);
}
{
struct fib6_info *iter;
- if (rt->fib6_nh.nh_dev == dev)
+ if (rt->fib6_nh.fib_nh_dev == dev)
return true;
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
- if (iter->fib6_nh.nh_dev == dev)
+ if (iter->fib6_nh.fib_nh_dev == dev)
return true;
return false;
struct fib6_info *iter;
unsigned int dead = 0;
- if (rt->fib6_nh.nh_dev == down_dev ||
- rt->fib6_nh.nh_flags & RTNH_F_DEAD)
+ if (rt->fib6_nh.fib_nh_dev == down_dev ||
+ rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
dead++;
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
- if (iter->fib6_nh.nh_dev == down_dev ||
- iter->fib6_nh.nh_flags & RTNH_F_DEAD)
+ if (iter->fib6_nh.fib_nh_dev == down_dev ||
+ iter->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
dead++;
return dead;
{
struct fib6_info *iter;
- if (rt->fib6_nh.nh_dev == dev)
- rt->fib6_nh.nh_flags |= nh_flags;
+ if (rt->fib6_nh.fib_nh_dev == dev)
+ rt->fib6_nh.fib_nh_flags |= nh_flags;
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
- if (iter->fib6_nh.nh_dev == dev)
- iter->fib6_nh.nh_flags |= nh_flags;
+ if (iter->fib6_nh.fib_nh_dev == dev)
+ iter->fib6_nh.fib_nh_flags |= nh_flags;
}
/* called with write lock held for table with rt */
switch (arg->event) {
case NETDEV_UNREGISTER:
- return rt->fib6_nh.nh_dev == dev ? -1 : 0;
+ return rt->fib6_nh.fib_nh_dev == dev ? -1 : 0;
case NETDEV_DOWN:
if (rt->should_flush)
return -1;
if (!rt->fib6_nsiblings)
- return rt->fib6_nh.nh_dev == dev ? -1 : 0;
+ return rt->fib6_nh.fib_nh_dev == dev ? -1 : 0;
if (rt6_multipath_uses_dev(rt, dev)) {
unsigned int count;
}
return -2;
case NETDEV_CHANGE:
- if (rt->fib6_nh.nh_dev != dev ||
+ if (rt->fib6_nh.fib_nh_dev != dev ||
rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
break;
- rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
+ rt->fib6_nh.fib_nh_flags |= RTNH_F_LINKDOWN;
rt6_multipath_rebalance(rt);
break;
}
Since RFC 1981 doesn't include administrative MTU increase
update PMTU increase is a MUST. (i.e. jumbo frame)
*/
- if (rt->fib6_nh.nh_dev == arg->dev &&
+ if (rt->fib6_nh.fib_nh_dev == arg->dev &&
!fib6_metric_locked(rt, RTAX_MTU)) {
u32 mtu = rt->fib6_pmtu;
goto cleanup;
}
- rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
+ rt->fib6_nh.fib_nh_weight = rtnh->rtnh_hops + 1;
err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
rt, &r_cfg);
nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
+ NLA_ALIGN(sizeof(struct rtnexthop))
+ nla_total_size(16) /* RTA_GATEWAY */
- + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate);
+ + lwtunnel_get_encap_size(rt->fib6_nh.fib_nh_lws);
nexthop_len *= rt->fib6_nsiblings;
}
+ nla_total_size(sizeof(struct rta_cacheinfo))
+ nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
+ nla_total_size(1) /* RTA_PREF */
- + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate)
+ + lwtunnel_get_encap_size(rt->fib6_nh.fib_nh_lws)
+ nexthop_len;
}
static int rt6_nexthop_info(struct sk_buff *skb, const struct fib6_nh *fib6_nh,
unsigned int *flags, bool skip_oif)
{
- if (fib6_nh->nh_flags & RTNH_F_DEAD)
+ if (fib6_nh->fib_nh_flags & RTNH_F_DEAD)
*flags |= RTNH_F_DEAD;
- if (fib6_nh->nh_flags & RTNH_F_LINKDOWN) {
+ if (fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
*flags |= RTNH_F_LINKDOWN;
rcu_read_lock();
- if (ip6_ignore_linkdown(fib6_nh->nh_dev))
+ if (ip6_ignore_linkdown(fib6_nh->fib_nh_dev))
*flags |= RTNH_F_DEAD;
rcu_read_unlock();
}
if (fib6_nh->fib_nh_has_gw) {
- if (nla_put_in6_addr(skb, RTA_GATEWAY, &fib6_nh->nh_gw) < 0)
+ if (nla_put_in6_addr(skb, RTA_GATEWAY, &fib6_nh->fib_nh_gw6) < 0)
goto nla_put_failure;
}
- *flags |= (fib6_nh->nh_flags & RTNH_F_ONLINK);
- if (fib6_nh->nh_flags & RTNH_F_OFFLOAD)
+ *flags |= (fib6_nh->fib_nh_flags & RTNH_F_ONLINK);
+ if (fib6_nh->fib_nh_flags & RTNH_F_OFFLOAD)
*flags |= RTNH_F_OFFLOAD;
/* not needed for multipath encoding b/c it has a rtnexthop struct */
- if (!skip_oif && fib6_nh->nh_dev &&
- nla_put_u32(skb, RTA_OIF, fib6_nh->nh_dev->ifindex))
+ if (!skip_oif && fib6_nh->fib_nh_dev &&
+ nla_put_u32(skb, RTA_OIF, fib6_nh->fib_nh_dev->ifindex))
goto nla_put_failure;
- if (fib6_nh->nh_lwtstate &&
- lwtunnel_fill_encap(skb, fib6_nh->nh_lwtstate) < 0)
+ if (fib6_nh->fib_nh_lws &&
+ lwtunnel_fill_encap(skb, fib6_nh->fib_nh_lws) < 0)
goto nla_put_failure;
return 0;
/* add multipath next hop */
static int rt6_add_nexthop(struct sk_buff *skb, const struct fib6_nh *fib6_nh)
{
- const struct net_device *dev = fib6_nh->nh_dev;
+ const struct net_device *dev = fib6_nh->fib_nh_dev;
struct rtnexthop *rtnh;
unsigned int flags = 0;
if (!rtnh)
goto nla_put_failure;
- rtnh->rtnh_hops = fib6_nh->nh_weight - 1;
+ rtnh->rtnh_hops = fib6_nh->fib_nh_weight - 1;
rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
if (rt6_nexthop_info(skb, fib6_nh, &flags, true) < 0)
static bool fib6_info_uses_dev(const struct fib6_info *f6i,
const struct net_device *dev)
{
- if (f6i->fib6_nh.nh_dev == dev)
+ if (f6i->fib6_nh.fib_nh_dev == dev)
return true;
if (f6i->fib6_nsiblings) {
list_for_each_entry_safe(sibling, next_sibling,
&f6i->fib6_siblings, fib6_siblings) {
- if (sibling->fib6_nh.nh_dev == dev)
+ if (sibling->fib6_nh.fib_nh_dev == dev)
return true;
}
}
return NOTIFY_OK;
if (event == NETDEV_REGISTER) {
- net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev;
+ net->ipv6.fib6_null_entry->fib6_nh.fib_nh_dev = dev;
net->ipv6.ip6_null_entry->dst.dev = dev;
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
/* Registering of the loopback is done before this portion of code,
* the loopback reference in rt6_info will not be taken, do it
* manually for init_net */
- init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev;
+ init_net.ipv6.fib6_null_entry->fib6_nh.fib_nh_dev = init_net.loopback_dev;
init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES