void fib6_info_destroy(struct rt6_info *f6i)
{
struct rt6_exception_bucket *bucket;
+ struct dst_metrics *m;
WARN_ON(f6i->rt6i_node);
if (f6i->fib6_nh.nh_dev)
dev_put(f6i->fib6_nh.nh_dev);
+ m = f6i->fib6_metrics;
+ if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt))
+ kfree(m);
+
kfree(f6i);
}
EXPORT_SYMBOL_GPL(fib6_info_destroy);
/* clean up an intermediate node */
if (!(fn->fn_flags & RTN_RTINFO)) {
RCU_INIT_POINTER(fn->leaf, NULL);
- rt6_release(leaf);
+ fib6_info_release(leaf);
/* remove null_entry in the root node */
} else if (fn->fn_flags & RTN_TL_ROOT &&
rcu_access_pointer(fn->leaf) ==
if (!(fn->fn_flags & RTN_RTINFO) && leaf == rt) {
new_leaf = fib6_find_prefix(net, table, fn);
atomic_inc(&new_leaf->rt6i_ref);
+
rcu_assign_pointer(fn->leaf, new_leaf);
- rt6_release(rt);
+ fib6_info_release(rt);
}
fn = rcu_dereference_protected(fn->parent,
lockdep_is_held(&table->tb6_lock));
}
+
+ if (rt->rt6i_pcpu) {
+ int cpu;
+
+ /* release the reference to this fib entry from
+ * all of its cached pcpu routes
+ */
+ for_each_possible_cpu(cpu) {
+ struct rt6_info **ppcpu_rt;
+ struct rt6_info *pcpu_rt;
+
+ ppcpu_rt = per_cpu_ptr(rt->rt6i_pcpu, cpu);
+ pcpu_rt = *ppcpu_rt;
+ if (pcpu_rt) {
+ fib6_info_release(pcpu_rt->from);
+ pcpu_rt->from = NULL;
+ }
+ }
+ }
}
}
fib6_purge_rt(iter, fn, info->nl_net);
if (rcu_access_pointer(fn->rr_ptr) == iter)
fn->rr_ptr = NULL;
- rt6_release(iter);
+ fib6_info_release(iter);
if (nsiblings) {
/* Replacing an ECMP route, remove all siblings */
fib6_purge_rt(iter, fn, info->nl_net);
if (rcu_access_pointer(fn->rr_ptr) == iter)
fn->rr_ptr = NULL;
- rt6_release(iter);
+ fib6_info_release(iter);
nsiblings--;
info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
} else {
int replace_required = 0;
int sernum = fib6_new_sernum(info->nl_net);
- if (WARN_ON_ONCE(!atomic_read(&rt->dst.__refcnt)))
- return -EINVAL;
-
if (info->nlh) {
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
allow_create = 0;
if (pn_leaf == rt) {
pn_leaf = NULL;
RCU_INIT_POINTER(pn->leaf, NULL);
- atomic_dec(&rt->rt6i_ref);
+ fib6_info_release(rt);
}
if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
pn_leaf = fib6_find_prefix(info->nl_net, table,
info->nl_net->ipv6.fib6_null_entry;
}
#endif
- atomic_inc(&pn_leaf->rt6i_ref);
+ fib6_info_hold(pn_leaf);
rcu_assign_pointer(pn->leaf, pn_leaf);
}
}
(fn->fn_flags & RTN_TL_ROOT &&
!rcu_access_pointer(fn->leaf))))
fib6_repair_tree(info->nl_net, table, fn);
- /* Always release dst as dst->__refcnt is guaranteed
- * to be taken before entering this function
- */
- dst_release_immediate(&rt->dst);
return err;
}
new_fn_leaf = net->ipv6.fib6_null_entry;
}
#endif
- atomic_inc(&new_fn_leaf->rt6i_ref);
+ fib6_info_hold(new_fn_leaf);
rcu_assign_pointer(fn->leaf, new_fn_leaf);
return pn;
}
return pn;
RCU_INIT_POINTER(pn->leaf, NULL);
- rt6_release(pn_leaf);
+ fib6_info_release(pn_leaf);
fn = pn;
}
}
call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, rt, NULL);
if (!info->skip_notify)
inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
- rt6_release(rt);
+ fib6_info_release(rt);
}
/* Need to own table->tb6_lock */
dev = rt->fib6_nh.nh_dev;
seq_printf(seq, " %08x %08x %08x %08x %8s\n",
- rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
- rt->dst.__use, rt->rt6i_flags,
- dev ? dev->name : "");
+ rt->rt6i_metric, atomic_read(&rt->rt6i_ref), 0,
+ rt->rt6i_flags, dev ? dev->name : "");
iter->w.leaf = NULL;
return 0;
}
memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
INIT_LIST_HEAD(&rt->rt6i_siblings);
INIT_LIST_HEAD(&rt->rt6i_uncached);
- rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
}
/* allocate dst with ip6_dst_ops */
-static struct rt6_info *__ip6_dst_alloc(struct net *net,
- struct net_device *dev,
- int flags)
+struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
+ int flags)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
1, DST_OBSOLETE_FORCE_CHK, flags);
return rt;
}
-
-struct rt6_info *ip6_dst_alloc(struct net *net,
- struct net_device *dev,
- int flags)
-{
- struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
-
- if (rt) {
- rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
- if (!rt->rt6i_pcpu) {
- dst_release_immediate(&rt->dst);
- return NULL;
- }
- }
-
- return rt;
-}
EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *)dst;
- struct rt6_exception_bucket *bucket;
struct rt6_info *from = rt->from;
struct inet6_dev *idev;
- struct dst_metrics *m;
dst_destroy_metrics_generic(dst);
- free_percpu(rt->rt6i_pcpu);
rt6_uncached_list_del(rt);
idev = rt->rt6i_idev;
rt->rt6i_idev = NULL;
in6_dev_put(idev);
}
- bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1);
- if (bucket) {
- rt->rt6i_exception_bucket = NULL;
- kfree(bucket);
- }
-
- m = rt->fib6_metrics;
- if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt))
- kfree(m);
rt->from = NULL;
- dst_release(&from->dst);
+ fib6_info_release(from);
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
else
fib6_set_expires(rt, jiffies + HZ * lifetime);
- ip6_rt_put(rt);
+ fib6_info_release(rt);
}
return 0;
}
static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
{
- BUG_ON(from->from);
-
rt->rt6i_flags &= ~RTF_EXPIRES;
- if (dst_hold_safe(&from->dst))
- rt->from = from;
+ fib6_info_hold(from);
+ rt->from = from;
dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
if (from->fib6_metrics != &dst_default_metrics) {
rt->dst._metrics |= DST_METRICS_REFCOUNTED;
struct net_device *dev = rt->fib6_nh.nh_dev;
struct rt6_info *nrt;
- nrt = __ip6_dst_alloc(dev_net(dev), dev, flags);
+ nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
if (nrt)
ip6_rt_copy_init(nrt, rt);
{
struct nl_info info = { .nl_net = net, };
- /* Hold dst to account for the reference from the fib6 tree */
- dst_hold(&rt->dst);
return __ip6_ins_rt(rt, &info, NULL);
}
rcu_read_lock();
dev = ip6_rt_get_dev_rcu(ort);
- rt = __ip6_dst_alloc(dev_net(dev), dev, 0);
+ rt = ip6_dst_alloc(dev_net(dev), dev, 0);
rcu_read_unlock();
if (!rt)
return NULL;
rcu_read_lock();
dev = ip6_rt_get_dev_rcu(rt);
- pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, flags);
+ pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
rcu_read_unlock();
if (!pcpu_rt)
return NULL;
net = dev_net(rt6_ex->rt6i->dst.dev);
rt6_ex->rt6i->rt6i_node = NULL;
hlist_del_rcu(&rt6_ex->hlist);
- rt6_release(rt6_ex->rt6i);
+ ip6_rt_put(rt6_ex->rt6i);
kfree_rcu(rt6_ex, rcu);
WARN_ON_ONCE(!bucket->depth);
bucket->depth--;
struct rt6_info *uncached_rt;
- if (ip6_hold_safe(net, &f6i, true)) {
- dst_use_noref(&f6i->dst, jiffies);
- } else {
- rcu_read_unlock();
- uncached_rt = f6i;
- goto uncached_rt_out;
- }
+ fib6_info_hold(f6i);
rcu_read_unlock();
uncached_rt = ip6_rt_cache_alloc(f6i, &fl6->daddr, NULL);
- dst_release(&rt->dst);
+ fib6_info_release(f6i);
if (uncached_rt) {
/* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
dst_hold(&uncached_rt->dst);
}
-uncached_rt_out:
trace_fib6_table_lookup(net, uncached_rt, table, fl6);
return uncached_rt;
struct rt6_info *pcpu_rt;
- dst_use_noref(&f6i->dst, jiffies);
local_bh_disable();
pcpu_rt = rt6_get_pcpu_route(f6i);
- if (!pcpu_rt) {
- /* atomic_inc_not_zero() is needed when using rcu */
- if (atomic_inc_not_zero(&f6i->rt6i_ref)) {
- /* No dst_hold() on rt is needed because grabbing
- * rt->rt6i_ref makes sure rt can't be released.
- */
- pcpu_rt = rt6_make_pcpu_route(net, f6i);
- rt6_release(f6i);
- } else {
- /* rt is already removed from tree */
- pcpu_rt = net->ipv6.ip6_null_entry;
- dst_hold(&pcpu_rt->dst);
- }
- }
+ if (!pcpu_rt)
+ pcpu_rt = rt6_make_pcpu_route(net, f6i);
+
local_bh_enable();
rcu_read_unlock();
trace_fib6_table_lookup(net, pcpu_rt, table, fl6);
* Destination cache support functions
*/
+static bool fib6_check(struct rt6_info *f6i, u32 cookie)
+{
+ u32 rt_cookie = 0;
+
+ if ((f6i && !rt6_get_cookie_safe(f6i, &rt_cookie)) ||
+ rt_cookie != cookie)
+ return false;
+
+ if (fib6_check_expired(f6i))
+ return false;
+
+ return true;
+}
+
static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
{
u32 rt_cookie = 0;
- if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
+ if ((rt->from && !rt6_get_cookie_safe(rt->from, &rt_cookie)) ||
+ rt_cookie != cookie)
return NULL;
if (rt6_check_expired(rt))
{
if (!__rt6_check_expired(rt) &&
rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
- rt6_check(rt->from, cookie))
+ fib6_check(rt->from, cookie))
return &rt->dst;
else
return NULL;
if (rt) {
if (rt->rt6i_flags & RTF_CACHE) {
if (rt6_check_expired(rt)) {
- ip6_del_rt(dev_net(dst->dev), rt);
+ rt6_remove_exception_rt(rt);
dst = NULL;
}
} else {
if (rt) {
if (rt->rt6i_flags & RTF_CACHE) {
if (dst_hold_safe(&rt->dst))
- ip6_del_rt(dev_net(rt->dst.dev), rt);
- } else {
+ rt6_remove_exception_rt(rt);
+ } else if (rt->from) {
struct fib6_node *fn;
rcu_read_lock();
- fn = rcu_dereference(rt->rt6i_node);
+ fn = rcu_dereference(rt->from->rt6i_node);
if (fn && (rt->rt6i_flags & RTF_DEFAULT))
fn->fn_sernum = -1;
rcu_read_unlock();
if (!table)
goto out;
- rt = ip6_dst_alloc(net, NULL,
- (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
-
- if (!rt) {
- err = -ENOMEM;
+ err = -ENOMEM;
+ rt = fib6_info_alloc(gfp_flags);
+ if (!rt)
goto out;
- }
+
+ if (cfg->fc_flags & RTF_ADDRCONF)
+ rt->dst_nocount = true;
err = ip6_convert_metrics(net, rt, cfg);
if (err < 0)
if (err)
goto out;
- rt->fib6_nh.nh_gw = rt->rt6i_gateway = cfg->fc_gateway;
+ rt->fib6_nh.nh_gw = cfg->fc_gateway;
}
err = -ENODEV;
!netif_carrier_ok(dev))
rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
- rt->fib6_nh.nh_dev = rt->dst.dev = dev;
+ rt->fib6_nh.nh_dev = dev;
rt->rt6i_idev = idev;
rt->rt6i_table = table;
dev_put(dev);
if (idev)
in6_dev_put(idev);
- if (rt)
- dst_release_immediate(&rt->dst);
+ fib6_info_release(rt);
return ERR_PTR(err);
}
return PTR_ERR(rt);
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
+ fib6_info_release(rt);
return err;
}
spin_unlock_bh(&table->tb6_lock);
out:
- ip6_rt_put(rt);
+ fib6_info_release(rt);
return err;
}
out_unlock:
spin_unlock_bh(&table->tb6_lock);
out_put:
- ip6_rt_put(rt);
+ fib6_info_release(rt);
if (skb) {
rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
continue;
if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
continue;
- if (!dst_hold_safe(&rt->dst))
- break;
+ fib6_info_hold(rt);
rcu_read_unlock();
/* if gateway was specified only delete the one hop */
for_each_fib6_node_rt_rcu(&table->tb6_root) {
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
(!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
- if (dst_hold_safe(&rt->dst)) {
- rcu_read_unlock();
- ip6_del_rt(net, rt);
- } else {
- rcu_read_unlock();
- }
+ fib6_info_hold(rt);
+ rcu_read_unlock();
+ ip6_del_rt(net, rt);
goto restart;
}
}
struct net_device *dev = idev->dev;
struct rt6_info *rt;
- rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
+ rt = fib6_info_alloc(gfp_flags);
if (!rt)
return ERR_PTR(-ENOMEM);
}
rt->fib6_nh.nh_gw = *addr;
+ dev_hold(dev);
rt->fib6_nh.nh_dev = dev;
- rt->rt6i_gateway = *addr;
rt->rt6i_dst.addr = *addr;
rt->rt6i_dst.plen = 128;
tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
rt, &r_cfg);
if (err) {
- dst_release_immediate(&rt->dst);
+ fib6_info_release(rt);
goto cleanup;
}
list_for_each_entry(nh, &rt6_nh_list, next) {
rt_last = nh->rt6_info;
err = __ip6_ins_rt(nh->rt6_info, info, extack);
+ fib6_info_release(nh->rt6_info);
+
/* save reference to first route for notification */
if (!rt_notif && !err)
rt_notif = nh->rt6_info;
cleanup:
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
if (nh->rt6_info)
- dst_release_immediate(&nh->rt6_info->dst);
+ fib6_info_release(nh->rt6_info);
list_del(&nh->next);
kfree(nh);
}
goto errout;
}
- if (fibmatch && rt->from) {
- struct rt6_info *ort = rt->from;
-
- dst_hold(&ort->dst);
- ip6_rt_put(rt);
- rt = ort;
- }
-
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
ip6_rt_put(rt);
skb_dst_set(skb, &rt->dst);
if (fibmatch)
- err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, iif,
+ err = rt6_fill_node(net, skb, rt->from, NULL, NULL, NULL, iif,
RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0);
else
- err = rt6_fill_node(net, skb, rt, dst, &fl6.daddr, &fl6.saddr,
- iif, RTM_NEWROUTE,
+ err = rt6_fill_node(net, skb, rt->from, dst,
+ &fl6.daddr, &fl6.saddr, iif, RTM_NEWROUTE,
NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
0);
if (err < 0) {