memset(&fl, 0, sizeof fl);
ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr);
ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr);
- fl.oif = addr->bound_dev_if;
+ fl.flowi_oif = addr->bound_dev_if;
dst = ip6_route_output(&init_net, NULL, &fl);
if ((ret = dst->error))
memset(&fl, 0, sizeof(fl));
ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
- fl.oif = dst_addr->sin6_scope_id;
+ fl.flowi_oif = dst_addr->sin6_scope_id;
*dst = ip6_route_output(&init_net, NULL, &fl);
if (*dst)
static inline bool dn_is_input_route(struct dn_route *rt)
{
- return rt->fl.iif != 0;
+ return rt->fl.flowi_iif != 0;
}
static inline bool dn_is_output_route(struct dn_route *rt)
{
- return rt->fl.iif == 0;
+ return rt->fl.flowi_iif == 0;
}
extern void dn_route_init(void);
#include <asm/atomic.h>
struct flowi {
- int oif;
- int iif;
- __u32 mark;
- __u8 tos;
- __u8 scope;
- __u8 proto;
- __u8 flags;
+ int flowi_oif;
+ int flowi_iif;
+ __u32 flowi_mark;
+ __u8 flowi_tos;
+ __u8 flowi_scope;
+ __u8 flowi_proto;
+ __u8 flowi_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_PRECOW_METRICS 0x02
#define FLOWI_FLAG_CAN_SLEEP 0x04
- __u32 secid;
+ __u32 flowi_secid;
union {
struct {
#define fl6_flowlabel nl_u.ip6_u.flowlabel
#define fl4_dst nl_u.ip4_u.daddr
#define fl4_src nl_u.ip4_u.saddr
-#define fl4_tos tos
-#define fl4_scope scope
+#define fl4_tos flowi_tos
+#define fl4_scope flowi_scope
union {
struct {
static inline int flow_cache_uli_match(const struct flowi *fl1,
const struct flowi *fl2)
{
- return (fl1->proto == fl2->proto &&
+ return (fl1->flowi_proto == fl2->flowi_proto &&
!memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u)));
}
__be32 saddr, u8 tos, int oif)
{
struct flowi fl = {
- .oif = oif,
+ .flowi_oif = oif,
.fl4_dst = daddr,
.fl4_src = saddr,
.fl4_tos = tos,
__u8 proto, __u8 tos, int oif)
{
struct flowi fl = {
- .oif = oif,
- .flags = sk ? inet_sk_flowi_flags(sk) : 0,
- .mark = sk ? sk->sk_mark : 0,
+ .flowi_oif = oif,
+ .flowi_flags = sk ? inet_sk_flowi_flags(sk) : 0,
+ .flowi_mark = sk ? sk->sk_mark : 0,
.fl4_dst = daddr,
.fl4_src = saddr,
.fl4_tos = tos,
- .proto = proto,
+ .flowi_proto = proto,
.fl_ip_dport = dport,
.fl_ip_sport = sport,
};
__be32 gre_key, __u8 tos, int oif)
{
struct flowi fl = {
- .oif = oif,
+ .flowi_oif = oif,
.fl4_dst = daddr,
.fl4_src = saddr,
.fl4_tos = tos,
- .proto = IPPROTO_GRE,
+ .flowi_proto = IPPROTO_GRE,
.fl_gre_key = gre_key,
};
return ip_route_output_key(net, &fl);
__be16 sport, __be16 dport,
struct sock *sk, bool can_sleep)
{
- struct flowi fl = { .oif = oif,
- .mark = sk->sk_mark,
+ struct flowi fl = { .flowi_oif = oif,
+ .flowi_mark = sk->sk_mark,
.fl4_dst = dst,
.fl4_src = src,
.fl4_tos = tos,
- .proto = protocol,
+ .flowi_proto = protocol,
.fl_ip_sport = sport,
.fl_ip_dport = dport };
struct net *net = sock_net(sk);
struct rtable *rt;
if (inet_sk(sk)->transparent)
- fl.flags |= FLOWI_FLAG_ANYSRC;
+ fl.flowi_flags |= FLOWI_FLAG_ANYSRC;
if (protocol == IPPROTO_TCP)
- fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
+ fl.flowi_flags |= FLOWI_FLAG_PRECOW_METRICS;
if (can_sleep)
- fl.flags |= FLOWI_FLAG_CAN_SLEEP;
+ fl.flowi_flags |= FLOWI_FLAG_CAN_SLEEP;
if (!dst || !src) {
rt = __ip_route_output_key(net, &fl);
__be16 dport, struct sock *sk)
{
if (sport != orig_sport || dport != orig_dport) {
- struct flowi fl = { .oif = rt->rt_oif,
- .mark = rt->rt_mark,
+ struct flowi fl = { .flowi_oif = rt->rt_oif,
+ .flowi_mark = rt->rt_mark,
.fl4_dst = rt->rt_key_dst,
.fl4_src = rt->rt_key_src,
.fl4_tos = rt->rt_tos,
- .proto = protocol,
+ .flowi_proto = protocol,
.fl_ip_sport = sport,
.fl_ip_dport = dport };
if (inet_sk(sk)->transparent)
- fl.flags |= FLOWI_FLAG_ANYSRC;
+ fl.flowi_flags |= FLOWI_FLAG_ANYSRC;
if (protocol == IPPROTO_TCP)
- fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
+ fl.flowi_flags |= FLOWI_FLAG_PRECOW_METRICS;
ip_rt_put(rt);
security_sk_classify_flow(sk, &fl);
return ip_route_output_flow(sock_net(sk), &fl, sk);
__be16 xfrm_flowi_sport(const struct flowi *fl)
{
__be16 port;
- switch(fl->proto) {
+ switch(fl->flowi_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
__be16 xfrm_flowi_dport(const struct flowi *fl)
{
__be16 port;
- switch(fl->proto) {
+ switch(fl->flowi_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
{
int ret = 0;
- if (rule->iifindex && (rule->iifindex != fl->iif))
+ if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
goto out;
- if (rule->oifindex && (rule->oifindex != fl->oif))
+ if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
goto out;
- if ((rule->mark ^ fl->mark) & rule->mark_mask)
+ if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
goto out;
ret = ops->match(rule, fl, flags);
struct sk_buff *skb)
{
struct rtable *rt;
- struct flowi fl = { .oif = skb_rtable(skb)->rt_iif,
- .fl4_dst = ip_hdr(skb)->saddr,
- .fl4_src = ip_hdr(skb)->daddr,
- .fl4_tos = RT_CONN_FLAGS(sk),
- .proto = sk->sk_protocol,
- .fl_ip_sport = dccp_hdr(skb)->dccph_dport,
- .fl_ip_dport = dccp_hdr(skb)->dccph_sport
- };
+ struct flowi fl = {
+ .flowi_oif = skb_rtable(skb)->rt_iif,
+ .fl4_dst = ip_hdr(skb)->saddr,
+ .fl4_src = ip_hdr(skb)->daddr,
+ .fl4_tos = RT_CONN_FLAGS(sk),
+ .flowi_proto = sk->sk_protocol,
+ .fl_ip_sport = dccp_hdr(skb)->dccph_dport,
+ .fl_ip_dport = dccp_hdr(skb)->dccph_sport,
+ };
security_skb_classify_flow(skb, &fl);
rt = ip_route_output_flow(net, &fl, sk);
for now.
*/
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_DCCP;
+ fl.flowi_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->sk_bound_dev_if;
+ fl.flowi_oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet->inet_dport;
fl.fl_ip_sport = inet->inet_sport;
security_sk_classify_flow(sk, &fl);
struct dst_entry *dst;
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_DCCP;
+ fl.flowi_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
fl.fl6_flowlabel = 0;
- fl.oif = ireq6->iif;
+ fl.flowi_oif = ireq6->iif;
fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fl.fl_ip_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, &fl);
ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr);
ipv6_addr_copy(&fl.fl6_src, &rxip6h->daddr);
- fl.proto = IPPROTO_DCCP;
- fl.oif = inet6_iif(rxskb);
+ fl.flowi_proto = IPPROTO_DCCP;
+ fl.flowi_oif = inet6_iif(rxskb);
fl.fl_ip_dport = dccp_hdr(skb)->dccph_dport;
fl.fl_ip_sport = dccp_hdr(skb)->dccph_sport;
security_skb_classify_flow(rxskb, &fl);
struct flowi fl;
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_DCCP;
+ fl.flowi_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
final_p = fl6_update_dst(&fl, opt, &final);
ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
- fl.oif = sk->sk_bound_dev_if;
+ fl.flowi_oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fl.fl_ip_sport = inet_rsk(req)->loc_port;
security_sk_classify_flow(sk, &fl);
if (!ipv6_addr_any(&np->rcv_saddr))
saddr = &np->rcv_saddr;
- fl.proto = IPPROTO_DCCP;
+ fl.flowi_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
- fl.oif = sk->sk_bound_dev_if;
+ fl.flowi_oif = sk->sk_bound_dev_if;
fl.fl_ip_dport = usin->sin6_port;
fl.fl_ip_sport = inet->inet_sport;
security_sk_classify_flow(sk, &fl);
err = -EHOSTUNREACH;
memset(&fl, 0, sizeof(fl));
- fl.oif = sk->sk_bound_dev_if;
+ fl.flowi_oif = sk->sk_bound_dev_if;
fl.fld_dst = dn_saddr2dn(&scp->peer);
fl.fld_src = dn_saddr2dn(&scp->addr);
dn_sk_ports_copy(&fl, scp);
- fl.proto = DNPROTO_NSP;
+ fl.flowi_proto = DNPROTO_NSP;
if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
goto out;
sk->sk_route_caps = sk->sk_dst_cache->dev->features;
memset(&fl, 0, sizeof(fl));
fl.fld_dst = nh->nh_gw;
- fl.oif = nh->nh_oif;
+ fl.flowi_oif = nh->nh_oif;
fl.fld_scope = r->rtm_scope + 1;
if (fl.fld_scope < RT_SCOPE_LINK)
for_nexthops(fi) {
if (nh->nh_flags & RTNH_F_DEAD)
continue;
- if (!fl->oif || fl->oif == nh->nh_oif)
+ if (!fl->flowi_oif || fl->flowi_oif == nh->nh_oif)
break;
}
if (nhsel < fi->fib_nhs) {
}
memset(&fl, 0, sizeof(fl));
- fl.oif = sk->sk_bound_dev_if;
+ fl.flowi_oif = sk->sk_bound_dev_if;
fl.fld_src = dn_saddr2dn(&scp->addr);
fl.fld_dst = dn_saddr2dn(&scp->peer);
dn_sk_ports_copy(&fl, scp);
- fl.proto = DNPROTO_NSP;
+ fl.flowi_proto = DNPROTO_NSP;
if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, 0) == 0) {
dst = sk_dst_get(sk);
sk->sk_route_caps = dst->dev->features;
{
return ((fl1->fld_dst ^ fl2->fld_dst) |
(fl1->fld_src ^ fl2->fld_src) |
- (fl1->mark ^ fl2->mark) |
+ (fl1->flowi_mark ^ fl2->flowi_mark) |
(fl1->fld_scope ^ fl2->fld_scope) |
- (fl1->oif ^ fl2->oif) |
- (fl1->iif ^ fl2->iif)) == 0;
+ (fl1->flowi_oif ^ fl2->flowi_oif) |
+ (fl1->flowi_iif ^ fl2->flowi_iif)) == 0;
}
static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
{
- struct flowi fl = { .fld_dst = oldflp->fld_dst,
- .fld_src = oldflp->fld_src,
- .fld_scope = RT_SCOPE_UNIVERSE,
- .mark = oldflp->mark,
- .iif = init_net.loopback_dev->ifindex,
- .oif = oldflp->oif };
+ struct flowi fl = {
+ .fld_dst = oldflp->fld_dst,
+ .fld_src = oldflp->fld_src,
+ .fld_scope = RT_SCOPE_UNIVERSE,
+ .flowi_mark = oldflp->flowi_mark,
+ .flowi_iif = init_net.loopback_dev->ifindex,
+ .flowi_oif = oldflp->flowi_oif,
+ };
struct dn_route *rt = NULL;
struct net_device *dev_out = NULL, *dev;
struct neighbour *neigh = NULL;
"dn_route_output_slow: dst=%04x src=%04x mark=%d"
" iif=%d oif=%d\n", le16_to_cpu(oldflp->fld_dst),
le16_to_cpu(oldflp->fld_src),
- oldflp->mark, init_net.loopback_dev->ifindex, oldflp->oif);
+ oldflp->flowi_mark, init_net.loopback_dev->ifindex, oldflp->flowi_oif);
/* If we have an output interface, verify its a DECnet device */
- if (oldflp->oif) {
- dev_out = dev_get_by_index(&init_net, oldflp->oif);
+ if (oldflp->flowi_oif) {
+ dev_out = dev_get_by_index(&init_net, oldflp->flowi_oif);
err = -ENODEV;
if (dev_out && dev_out->dn_ptr == NULL) {
dev_put(dev_out);
if (!fl.fld_dst)
goto out;
}
- fl.oif = init_net.loopback_dev->ifindex;
+ fl.flowi_oif = init_net.loopback_dev->ifindex;
res.type = RTN_LOCAL;
goto make_route;
}
"dn_route_output_slow: initial checks complete."
" dst=%o4x src=%04x oif=%d try_hard=%d\n",
le16_to_cpu(fl.fld_dst), le16_to_cpu(fl.fld_src),
- fl.oif, try_hard);
+ fl.flowi_oif, try_hard);
/*
* N.B. If the kernel is compiled without router support then
if (!try_hard) {
neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fl.fld_dst);
if (neigh) {
- if ((oldflp->oif &&
- (neigh->dev->ifindex != oldflp->oif)) ||
+ if ((oldflp->flowi_oif &&
+ (neigh->dev->ifindex != oldflp->flowi_oif)) ||
(oldflp->fld_src &&
(!dn_dev_islocal(neigh->dev,
oldflp->fld_src)))) {
if (fl.fld_src == 0 && res.type != RTN_LOCAL)
goto e_addr;
}
- fl.oif = dev_out->ifindex;
+ fl.flowi_oif = dev_out->ifindex;
goto make_route;
}
free_res = 1;
dev_put(dev_out);
dev_out = init_net.loopback_dev;
dev_hold(dev_out);
- fl.oif = dev_out->ifindex;
+ fl.flowi_oif = dev_out->ifindex;
if (res.fi)
dn_fib_info_put(res.fi);
res.fi = NULL;
goto make_route;
}
- if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ if (res.fi->fib_nhs > 1 && fl.flowi_oif == 0)
dn_fib_select_multipath(&fl, &res);
/*
dev_put(dev_out);
dev_out = DN_FIB_RES_DEV(res);
dev_hold(dev_out);
- fl.oif = dev_out->ifindex;
+ fl.flowi_oif = dev_out->ifindex;
gateway = DN_FIB_RES_GW(res);
make_route:
rt->fl.fld_src = oldflp->fld_src;
rt->fl.fld_dst = oldflp->fld_dst;
- rt->fl.oif = oldflp->oif;
- rt->fl.iif = 0;
- rt->fl.mark = oldflp->mark;
+ rt->fl.flowi_oif = oldflp->flowi_oif;
+ rt->fl.flowi_iif = 0;
+ rt->fl.flowi_mark = oldflp->flowi_mark;
rt->rt_saddr = fl.fld_src;
rt->rt_daddr = fl.fld_dst;
rt = rcu_dereference_bh(rt->dst.dn_next)) {
if ((flp->fld_dst == rt->fl.fld_dst) &&
(flp->fld_src == rt->fl.fld_src) &&
- (flp->mark == rt->fl.mark) &&
+ (flp->flowi_mark == rt->fl.flowi_mark) &&
dn_is_output_route(rt) &&
- (rt->fl.oif == flp->oif)) {
+ (rt->fl.flowi_oif == flp->flowi_oif)) {
dst_use(&rt->dst, jiffies);
rcu_read_unlock_bh();
*pprt = &rt->dst;
int err;
err = __dn_route_output_key(pprt, flp, flags);
- if (err == 0 && flp->proto) {
+ if (err == 0 && flp->flowi_proto) {
*pprt = xfrm_lookup(&init_net, *pprt, flp, NULL, 0);
if (IS_ERR(*pprt)) {
err = PTR_ERR(*pprt);
int err;
err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
- if (err == 0 && fl->proto) {
+ if (err == 0 && fl->flowi_proto) {
if (!(flags & MSG_DONTWAIT))
- fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+ fl->flowi_flags |= FLOWI_FLAG_CAN_SLEEP;
*pprt = xfrm_lookup(&init_net, *pprt, fl, sk, 0);
if (IS_ERR(*pprt)) {
err = PTR_ERR(*pprt);
int flags = 0;
__le16 gateway = 0;
__le16 local_src = 0;
- struct flowi fl = { .fld_dst = cb->dst,
- .fld_src = cb->src,
- .fld_scope = RT_SCOPE_UNIVERSE,
- .mark = skb->mark,
- .iif = skb->dev->ifindex };
+ struct flowi fl = {
+ .fld_dst = cb->dst,
+ .fld_src = cb->src,
+ .fld_scope = RT_SCOPE_UNIVERSE,
+ .flowi_mark = skb->mark,
+ .flowi_iif = skb->dev->ifindex,
+ };
struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
int err = -EINVAL;
int free_res = 0;
if (dn_db->parms.forwarding == 0)
goto e_inval;
- if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ if (res.fi->fib_nhs > 1 && fl.flowi_oif == 0)
dn_fib_select_multipath(&fl, &res);
/*
rt->fl.fld_src = cb->src;
rt->fl.fld_dst = cb->dst;
- rt->fl.oif = 0;
- rt->fl.iif = in_dev->ifindex;
- rt->fl.mark = fl.mark;
+ rt->fl.flowi_oif = 0;
+ rt->fl.flowi_iif = in_dev->ifindex;
+ rt->fl.flowi_mark = fl.flowi_mark;
rt->dst.flags = DST_HOST;
rt->dst.neighbour = neigh;
rt = rcu_dereference(rt->dst.dn_next)) {
if ((rt->fl.fld_src == cb->src) &&
(rt->fl.fld_dst == cb->dst) &&
- (rt->fl.oif == 0) &&
- (rt->fl.mark == skb->mark) &&
- (rt->fl.iif == cb->iif)) {
+ (rt->fl.flowi_oif == 0) &&
+ (rt->fl.flowi_mark == skb->mark) &&
+ (rt->fl.flowi_iif == cb->iif)) {
dst_use(&rt->dst, jiffies);
rcu_read_unlock();
skb_dst_set(skb, (struct dst_entry *)rt);
rt->dst.error) < 0)
goto rtattr_failure;
if (dn_is_input_route(rt))
- RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
+ RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.flowi_iif);
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
return -EINVAL;
memset(&fl, 0, sizeof(fl));
- fl.proto = DNPROTO_NSP;
+ fl.flowi_proto = DNPROTO_NSP;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb == NULL)
if (rta[RTA_DST-1])
memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2);
if (rta[RTA_IIF-1])
- memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
+ memcpy(&fl.flowi_iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
- if (fl.iif) {
+ if (fl.flowi_iif) {
struct net_device *dev;
- if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) {
+ if ((dev = dev_get_by_index(&init_net, fl.flowi_iif)) == NULL) {
kfree_skb(skb);
return -ENODEV;
}
int oif = 0;
if (rta[RTA_OIF - 1])
memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
- fl.oif = oif;
+ fl.flowi_oif = oif;
err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0);
}
int ret;
struct net *net;
- fl.oif = 0;
- fl.iif = oif;
- fl.mark = mark;
+ fl.flowi_oif = 0;
+ fl.flowi_iif = oif;
+ fl.flowi_mark = mark;
fl.fl4_dst = src;
fl.fl4_src = dst;
fl.fl4_tos = tos;
rpf = IN_DEV_RPFILTER(in_dev);
accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
if (mark && !IN_DEV_SRC_VMARK(in_dev))
- fl.mark = 0;
+ fl.flowi_mark = 0;
}
if (in_dev == NULL)
goto last_resort;
if (rpf == 1)
goto e_rpf;
- fl.oif = dev->ifindex;
+ fl.flowi_oif = dev->ifindex;
ret = 0;
if (fib_lookup(net, &fl, &res) == 0) {
struct fib_result res;
struct flowi fl = {
- .mark = frn->fl_mark,
+ .flowi_mark = frn->fl_mark,
.fl4_dst = frn->fl_addr,
.fl4_tos = frn->fl_tos,
.fl4_scope = frn->fl_scope,
struct flowi fl = {
.fl4_dst = nh->nh_gw,
.fl4_scope = cfg->fc_scope + 1,
- .oif = nh->nh_oif,
+ .flowi_oif = nh->nh_oif,
};
/* It is not necessary, but requires a bit of thinking */
if (nh->nh_flags & RTNH_F_DEAD)
continue;
- if (flp->oif && flp->oif != nh->nh_oif)
+ if (flp->flowi_oif && flp->flowi_oif != nh->nh_oif)
continue;
#ifdef CONFIG_IP_FIB_TRIE_STATS
daddr = icmp_param->replyopts.faddr;
}
{
- struct flowi fl = { .fl4_dst= daddr,
- .fl4_src = rt->rt_spec_dst,
- .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
- .proto = IPPROTO_ICMP };
+ struct flowi fl = {
+ .fl4_dst = daddr,
+ .fl4_src = rt->rt_spec_dst,
+ .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
+ .flowi_proto = IPPROTO_ICMP,
+ };
security_skb_classify_flow(skb, &fl);
rt = ip_route_output_key(net, &fl);
if (IS_ERR(rt))
param->replyopts.faddr : iph->saddr),
.fl4_src = saddr,
.fl4_tos = RT_TOS(tos),
- .proto = IPPROTO_ICMP,
+ .flowi_proto = IPPROTO_ICMP,
.fl_icmp_type = type,
.fl_icmp_code = code,
};
struct rtable *rt;
const struct inet_request_sock *ireq = inet_rsk(req);
struct ip_options *opt = inet_rsk(req)->opt;
- struct flowi fl = { .oif = sk->sk_bound_dev_if,
- .mark = sk->sk_mark,
- .fl4_dst = ((opt && opt->srr) ?
- opt->faddr : ireq->rmt_addr),
- .fl4_src = ireq->loc_addr,
- .fl4_tos = RT_CONN_FLAGS(sk),
- .proto = sk->sk_protocol,
- .flags = inet_sk_flowi_flags(sk),
- .fl_ip_sport = inet_sk(sk)->inet_sport,
- .fl_ip_dport = ireq->rmt_port };
+ struct flowi fl = {
+ .flowi_oif = sk->sk_bound_dev_if,
+ .flowi_mark = sk->sk_mark,
+ .fl4_dst = ((opt && opt->srr) ?
+ opt->faddr : ireq->rmt_addr),
+ .fl4_src = ireq->loc_addr,
+ .fl4_tos = RT_CONN_FLAGS(sk),
+ .flowi_proto = sk->sk_protocol,
+ .flowi_flags = inet_sk_flowi_flags(sk),
+ .fl_ip_sport = inet_sk(sk)->inet_sport,
+ .fl_ip_dport = ireq->rmt_port,
+ };
struct net *net = sock_net(sk);
security_req_classify_flow(req, &fl);
}
{
- struct flowi fl = { .oif = arg->bound_dev_if,
- .fl4_dst = daddr,
- .fl4_src = rt->rt_spec_dst,
- .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
- .fl_ip_sport = tcp_hdr(skb)->dest,
- .fl_ip_dport = tcp_hdr(skb)->source,
- .proto = sk->sk_protocol,
- .flags = ip_reply_arg_flowi_flags(arg) };
+ struct flowi fl = {
+ .flowi_oif = arg->bound_dev_if,
+ .fl4_dst = daddr,
+ .fl4_src = rt->rt_spec_dst,
+ .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
+ .fl_ip_sport = tcp_hdr(skb)->dest,
+ .fl_ip_dport = tcp_hdr(skb)->source,
+ .flowi_proto = sk->sk_protocol,
+ .flowi_flags = ip_reply_arg_flowi_flags(arg),
+ };
security_skb_classify_flow(skb, &fl);
rt = ip_route_output_key(sock_net(sk), &fl);
if (IS_ERR(rt))
struct net *net = dev_net(dev);
struct mr_table *mrt;
struct flowi fl = {
- .oif = dev->ifindex,
- .iif = skb->skb_iif,
- .mark = skb->mark,
+ .flowi_oif = dev->ifindex,
+ .flowi_iif = skb->skb_iif,
+ .flowi_mark = skb->mark,
};
int err;
.fl4_dst = rt->rt_key_dst,
.fl4_src = rt->rt_key_src,
.fl4_tos = rt->rt_tos,
- .oif = rt->rt_oif,
- .iif = rt->rt_iif,
- .mark = rt->rt_mark,
+ .flowi_oif = rt->rt_oif,
+ .flowi_iif = rt->rt_iif,
+ .flowi_mark = rt->rt_mark,
};
struct mr_table *mrt;
int err;
if (type == RTN_LOCAL)
fl.fl4_src = iph->saddr;
fl.fl4_tos = RT_TOS(iph->tos);
- fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
- fl.mark = skb->mark;
- fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
+ fl.flowi_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
+ fl.flowi_mark = skb->mark;
+ fl.flowi_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
rt = ip_route_output_key(net, &fl);
if (IS_ERR(rt))
return -1;
if (!iov)
continue;
- switch (fl->proto) {
+ switch (fl->flowi_proto) {
case IPPROTO_ICMP:
/* check if one-byte field is readable or not. */
if (iov->iov_base && iov->iov_len < 1)
}
{
- struct flowi fl = { .oif = ipc.oif,
- .mark = sk->sk_mark,
+ struct flowi fl = { .flowi_oif = ipc.oif,
+ .flowi_mark = sk->sk_mark,
.fl4_dst = daddr,
.fl4_src = saddr,
.fl4_tos = tos,
- .proto = inet->hdrincl ? IPPROTO_RAW :
+ .flowi_proto = inet->hdrincl ? IPPROTO_RAW :
sk->sk_protocol,
- .flags = FLOWI_FLAG_CAN_SLEEP,
+ .flowi_flags = FLOWI_FLAG_CAN_SLEEP,
};
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl, msg);
.fl4_dst = rt->rt_key_dst,
.fl4_src = rt->rt_key_src,
.fl4_tos = rt->rt_tos,
- .oif = rt->rt_oif,
- .iif = rt->rt_iif,
- .mark = rt->rt_mark,
+ .flowi_oif = rt->rt_oif,
+ .flowi_iif = rt->rt_iif,
+ .flowi_mark = rt->rt_mark,
};
rcu_read_lock();
/* If a peer entry exists for this destination, we must hook
* it up in order to get at cached metrics.
*/
- if (oldflp && (oldflp->flags & FLOWI_FLAG_PRECOW_METRICS))
+ if (oldflp && (oldflp->flowi_flags & FLOWI_FLAG_PRECOW_METRICS))
create = 1;
rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
return err;
/* put it into the cache */
- hash = rt_hash(daddr, saddr, fl->iif,
+ hash = rt_hash(daddr, saddr, fl->flowi_iif,
rt_genid(dev_net(rth->dst.dev)));
- rth = rt_intern_hash(hash, rth, skb, fl->iif);
+ rth = rt_intern_hash(hash, rth, skb, fl->flowi_iif);
if (IS_ERR(rth))
return PTR_ERR(rth);
return 0;
/*
* Now we are ready to route packet.
*/
- fl.oif = 0;
- fl.iif = dev->ifindex;
- fl.mark = skb->mark;
+ fl.flowi_oif = 0;
+ fl.flowi_iif = dev->ifindex;
+ fl.flowi_mark = skb->mark;
fl.fl4_dst = daddr;
fl.fl4_src = saddr;
fl.fl4_tos = tos;
rth->rt_flags &= ~RTCF_LOCAL;
}
rth->rt_type = res.type;
- hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
- rth = rt_intern_hash(hash, rth, skb, fl.iif);
+ hash = rt_hash(daddr, saddr, fl.flowi_iif, rt_genid(net));
+ rth = rt_intern_hash(hash, rth, skb, fl.flowi_iif);
err = 0;
if (IS_ERR(rth))
err = PTR_ERR(rth);
} else if (type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc_rcu(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
- oldflp->proto))
+ oldflp->flowi_proto))
flags &= ~RTCF_LOCAL;
/* If multicast route do not exist use
* default one, but do not gateway in this case.
rth->rt_key_dst = oldflp->fl4_dst;
rth->rt_tos = tos;
rth->rt_key_src = oldflp->fl4_src;
- rth->rt_oif = oldflp->oif;
- rth->rt_mark = oldflp->mark;
+ rth->rt_oif = oldflp->flowi_oif;
+ rth->rt_mark = oldflp->flowi_mark;
rth->rt_dst = fl->fl4_dst;
rth->rt_src = fl->fl4_src;
rth->rt_iif = 0;
res.r = NULL;
#endif
- fl.oif = oldflp->oif;
- fl.iif = net->loopback_dev->ifindex;
- fl.mark = oldflp->mark;
+ fl.flowi_oif = oldflp->flowi_oif;
+ fl.flowi_iif = net->loopback_dev->ifindex;
+ fl.flowi_mark = oldflp->flowi_mark;
fl.fl4_dst = oldflp->fl4_dst;
fl.fl4_src = oldflp->fl4_src;
fl.fl4_tos = tos & IPTOS_RT_MASK;
of another iface. --ANK
*/
- if (oldflp->oif == 0 &&
+ if (oldflp->flowi_oif == 0 &&
(ipv4_is_multicast(oldflp->fl4_dst) ||
ipv4_is_lbcast(oldflp->fl4_dst))) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
Luckily, this hack is good workaround.
*/
- fl.oif = dev_out->ifindex;
+ fl.flowi_oif = dev_out->ifindex;
goto make_route;
}
- if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
+ if (!(oldflp->flowi_flags & FLOWI_FLAG_ANYSRC)) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
if (!__ip_dev_find(net, oldflp->fl4_src, false))
goto out;
}
- if (oldflp->oif) {
- dev_out = dev_get_by_index_rcu(net, oldflp->oif);
+ if (oldflp->flowi_oif) {
+ dev_out = dev_get_by_index_rcu(net, oldflp->flowi_oif);
rth = ERR_PTR(-ENODEV);
if (dev_out == NULL)
goto out;
if (!fl.fl4_dst)
fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
dev_out = net->loopback_dev;
- fl.oif = net->loopback_dev->ifindex;
+ fl.flowi_oif = net->loopback_dev->ifindex;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
if (fib_lookup(net, &fl, &res)) {
res.fi = NULL;
- if (oldflp->oif) {
+ if (oldflp->flowi_oif) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
fl.fl4_src = fl.fl4_dst;
}
dev_out = net->loopback_dev;
- fl.oif = dev_out->ifindex;
+ fl.flowi_oif = dev_out->ifindex;
res.fi = NULL;
flags |= RTCF_LOCAL;
goto make_route;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ if (res.fi->fib_nhs > 1 && fl.flowi_oif == 0)
fib_select_multipath(&res);
else
#endif
- if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
+ if (!res.prefixlen && res.type == RTN_UNICAST && !fl.flowi_oif)
fib_select_default(&res);
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
dev_out = FIB_RES_DEV(res);
- fl.oif = dev_out->ifindex;
+ fl.flowi_oif = dev_out->ifindex;
make_route:
if (!IS_ERR(rth)) {
unsigned int hash;
- hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
+ hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->flowi_oif,
rt_genid(dev_net(dev_out)));
- rth = rt_intern_hash(hash, rth, NULL, oldflp->oif);
+ rth = rt_intern_hash(hash, rth, NULL, oldflp->flowi_oif);
}
out:
if (!rt_caching(net))
goto slow_output;
- hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
+ hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->flowi_oif, rt_genid(net));
rcu_read_lock_bh();
for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
if (rth->rt_key_dst == flp->fl4_dst &&
rth->rt_key_src == flp->fl4_src &&
rt_is_output_route(rth) &&
- rth->rt_oif == flp->oif &&
- rth->rt_mark == flp->mark &&
+ rth->rt_oif == flp->flowi_oif &&
+ rth->rt_mark == flp->flowi_mark &&
!((rth->rt_tos ^ flp->fl4_tos) &
(IPTOS_RT_MASK | RTO_ONLINK)) &&
net_eq(dev_net(rth->dst.dev), net) &&
if (IS_ERR(rt))
return rt;
- if (flp->proto) {
+ if (flp->flowi_proto) {
if (!flp->fl4_src)
flp->fl4_src = rt->rt_src;
if (!flp->fl4_dst)
.fl4_dst = dst,
.fl4_src = src,
.fl4_tos = rtm->rtm_tos,
- .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
- .mark = mark,
+ .flowi_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
+ .flowi_mark = mark,
};
rt = ip_route_output_key(net, &fl);
* no easy way to do this.
*/
{
- struct flowi fl = { .mark = sk->sk_mark,
- .fl4_dst = ((opt && opt->srr) ?
- opt->faddr : ireq->rmt_addr),
- .fl4_src = ireq->loc_addr,
- .fl4_tos = RT_CONN_FLAGS(sk),
- .proto = IPPROTO_TCP,
- .flags = inet_sk_flowi_flags(sk),
- .fl_ip_sport = th->dest,
- .fl_ip_dport = th->source };
+ struct flowi fl = {
+ .flowi_mark = sk->sk_mark,
+ .fl4_dst = ((opt && opt->srr) ?
+ opt->faddr : ireq->rmt_addr),
+ .fl4_src = ireq->loc_addr,
+ .fl4_tos = RT_CONN_FLAGS(sk),
+ .flowi_proto = IPPROTO_TCP,
+ .flowi_flags = inet_sk_flowi_flags(sk),
+ .fl_ip_sport = th->dest,
+ .fl_ip_dport = th->source,
+ };
security_req_classify_flow(req, &fl);
rt = ip_route_output_key(sock_net(sk), &fl);
if (IS_ERR(rt)) {
rt = (struct rtable *)sk_dst_check(sk, 0);
if (rt == NULL) {
- struct flowi fl = { .oif = ipc.oif,
- .mark = sk->sk_mark,
- .fl4_dst = faddr,
- .fl4_src = saddr,
- .fl4_tos = tos,
- .proto = sk->sk_protocol,
- .flags = (inet_sk_flowi_flags(sk) |
- FLOWI_FLAG_CAN_SLEEP),
- .fl_ip_sport = inet->inet_sport,
- .fl_ip_dport = dport
+ struct flowi fl = {
+ .flowi_oif = ipc.oif,
+ .flowi_mark = sk->sk_mark,
+ .fl4_dst = faddr,
+ .fl4_src = saddr,
+ .fl4_tos = tos,
+ .flowi_proto = sk->sk_protocol,
+ .flowi_flags = (inet_sk_flowi_flags(sk) |
+ FLOWI_FLAG_CAN_SLEEP),
+ .fl_ip_sport = inet->inet_sport,
+ .fl_ip_dport = dport,
};
struct net *net = sock_net(sk);
rt->rt_key_dst = fl->fl4_dst;
rt->rt_key_src = fl->fl4_src;
rt->rt_tos = fl->fl4_tos;
- rt->rt_iif = fl->iif;
- rt->rt_oif = fl->oif;
- rt->rt_mark = fl->mark;
+ rt->rt_iif = fl->flowi_iif;
+ rt->rt_oif = fl->flowi_oif;
+ rt->rt_mark = fl->flowi_mark;
xdst->u.dst.dev = dev;
dev_hold(dev);
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
memset(fl, 0, sizeof(struct flowi));
- fl->mark = skb->mark;
+ fl->flowi_mark = skb->mark;
if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
switch (iph->protocol) {
break;
}
}
- fl->proto = iph->protocol;
+ fl->flowi_proto = iph->protocol;
fl->fl4_dst = reverse ? iph->saddr : iph->daddr;
fl->fl4_src = reverse ? iph->daddr : iph->saddr;
fl->fl4_tos = iph->tos;
sel->family = AF_INET;
sel->prefixlen_d = 32;
sel->prefixlen_s = 32;
- sel->proto = fl->proto;
- sel->ifindex = fl->oif;
+ sel->proto = fl->flowi_proto;
+ sel->ifindex = fl->flowi_oif;
}
static void
struct flowi fl;
memset(&fl, 0, sizeof(fl));
- fl.proto = sk->sk_protocol;
+ fl.flowi_proto = sk->sk_protocol;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
fl.fl6_flowlabel = np->flow_label;
- fl.oif = sk->sk_bound_dev_if;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = sk->sk_bound_dev_if;
+ fl.flowi_mark = sk->sk_mark;
fl.fl_ip_dport = inet->inet_dport;
fl.fl_ip_sport = inet->inet_sport;
security_sk_classify_flow(sk, &fl);
* destination cache for it.
*/
- fl.proto = sk->sk_protocol;
+ fl.flowi_proto = sk->sk_protocol;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->sk_bound_dev_if;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = sk->sk_bound_dev_if;
+ fl.flowi_mark = sk->sk_mark;
fl.fl_ip_dport = inet->inet_dport;
fl.fl_ip_sport = inet->inet_sport;
- if (!fl.oif && (addr_type&IPV6_ADDR_MULTICAST))
- fl.oif = np->mcast_oif;
+ if (!fl.flowi_oif && (addr_type&IPV6_ADDR_MULTICAST))
+ fl.flowi_oif = np->mcast_oif;
security_sk_classify_flow(sk, &fl);
mtu_info->ip6m_addr.sin6_family = AF_INET6;
mtu_info->ip6m_addr.sin6_port = 0;
mtu_info->ip6m_addr.sin6_flowinfo = 0;
- mtu_info->ip6m_addr.sin6_scope_id = fl->oif;
+ mtu_info->ip6m_addr.sin6_scope_id = fl->flowi_oif;
ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
if (src_info->ipi6_ifindex) {
- if (fl->oif && src_info->ipi6_ifindex != fl->oif)
+ if (fl->flowi_oif && src_info->ipi6_ifindex != fl->flowi_oif)
return -EINVAL;
- fl->oif = src_info->ipi6_ifindex;
+ fl->flowi_oif = src_info->ipi6_ifindex;
}
addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
rcu_read_lock();
- if (fl->oif) {
- dev = dev_get_by_index_rcu(net, fl->oif);
+ if (fl->flowi_oif) {
+ dev = dev_get_by_index_rcu(net, fl->flowi_oif);
if (!dev) {
rcu_read_unlock();
return -ENODEV;
sizeof(struct icmp6hdr), skb->csum);
icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
&fl->fl6_dst,
- len, fl->proto,
+ len, fl->flowi_proto,
skb->csum);
} else {
__wsum tmp_csum = 0;
sizeof(struct icmp6hdr), tmp_csum);
icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
&fl->fl6_dst,
- len, fl->proto,
+ len, fl->flowi_proto,
tmp_csum);
}
ip6_push_pending_frames(sk);
mip6_addr_swap(skb);
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_ICMPV6;
+ fl.flowi_proto = IPPROTO_ICMPV6;
ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
if (saddr)
ipv6_addr_copy(&fl.fl6_src, saddr);
- fl.oif = iif;
+ fl.flowi_oif = iif;
fl.fl_icmp_type = type;
fl.fl_icmp_code = code;
security_skb_classify_flow(skb, &fl);
tmp_hdr.icmp6_cksum = 0;
tmp_hdr.icmp6_pointer = htonl(info);
- if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
- fl.oif = np->mcast_oif;
+ if (!fl.flowi_oif && ipv6_addr_is_multicast(&fl.fl6_dst))
+ fl.flowi_oif = np->mcast_oif;
dst = icmpv6_route_lookup(net, skb, sk, &fl);
if (IS_ERR(dst))
tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_ICMPV6;
+ fl.flowi_proto = IPPROTO_ICMPV6;
ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
if (saddr)
ipv6_addr_copy(&fl.fl6_src, saddr);
- fl.oif = skb->dev->ifindex;
+ fl.flowi_oif = skb->dev->ifindex;
fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
security_skb_classify_flow(skb, &fl);
return;
np = inet6_sk(sk);
- if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
- fl.oif = np->mcast_oif;
+ if (!fl.flowi_oif && ipv6_addr_is_multicast(&fl.fl6_dst))
+ fl.flowi_oif = np->mcast_oif;
err = ip6_dst_lookup(sk, &dst, &fl);
if (err)
memset(fl, 0, sizeof(*fl));
ipv6_addr_copy(&fl->fl6_src, saddr);
ipv6_addr_copy(&fl->fl6_dst, daddr);
- fl->proto = IPPROTO_ICMPV6;
+ fl->flowi_proto = IPPROTO_ICMPV6;
fl->fl_icmp_type = type;
fl->fl_icmp_code = 0;
- fl->oif = oif;
+ fl->flowi_oif = oif;
security_sk_classify_flow(sk, fl);
}
struct flowi fl;
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_TCP;
+ fl.flowi_proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
final_p = fl6_update_dst(&fl, np->opt, &final);
ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
- fl.oif = sk->sk_bound_dev_if;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = sk->sk_bound_dev_if;
+ fl.flowi_mark = sk->sk_mark;
fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fl.fl_ip_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, &fl);
struct in6_addr *final_p, final;
memset(&fl, 0, sizeof(fl));
- fl.proto = sk->sk_protocol;
+ fl.flowi_proto = sk->sk_protocol;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
fl.fl6_flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
- fl.oif = sk->sk_bound_dev_if;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = sk->sk_bound_dev_if;
+ fl.flowi_mark = sk->sk_mark;
fl.fl_ip_sport = inet->inet_sport;
fl.fl_ip_dport = inet->inet_dport;
security_sk_classify_flow(sk, &fl);
msg.msg_controllen = olen;
msg.msg_control = (void*)(fl->opt+1);
- flowi.oif = 0;
+ flowi.flowi_oif = 0;
err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk,
&junk, &junk);
struct in6_addr *first_hop = &fl->fl6_dst;
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr;
- u8 proto = fl->proto;
+ u8 proto = fl->flowi_proto;
int seg_len = skb->len;
int hlimit = -1;
int tclass = 0;
#ifdef CONFIG_IPV6_SUBTREES
ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) ||
#endif
- (fl->oif && fl->oif != dst->dev->ifindex)) {
+ (fl->flowi_oif && fl->flowi_oif != dst->dev->ifindex)) {
dst_release(dst);
dst = NULL;
}
if (final_dst)
ipv6_addr_copy(&fl->fl6_dst, final_dst);
if (can_sleep)
- fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+ fl->flowi_flags |= FLOWI_FLAG_CAN_SLEEP;
return xfrm_lookup(sock_net(sk), dst, fl, sk, 0);
}
if (final_dst)
ipv6_addr_copy(&fl->fl6_dst, final_dst);
if (can_sleep)
- fl->flags |= FLOWI_FLAG_CAN_SLEEP;
+ fl->flowi_flags |= FLOWI_FLAG_CAN_SLEEP;
return xfrm_lookup(sock_net(sk), dst, fl, sk, 0);
}
struct ipv6_txoptions *opt = np->cork.opt;
struct rt6_info *rt = (struct rt6_info *)inet->cork.dst;
struct flowi *fl = &inet->cork.fl;
- unsigned char proto = fl->proto;
+ unsigned char proto = fl->flowi_proto;
int err = 0;
if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
skb->transport_header = skb->network_header;
- proto = fl->proto;
+ proto = fl->flowi_proto;
if (encap_limit >= 0) {
init_tel_txopt(&opt, encap_limit);
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
encap_limit = t->parms.encap_limit;
memcpy(&fl, &t->fl, sizeof (fl));
- fl.proto = IPPROTO_IPIP;
+ fl.flowi_proto = IPPROTO_IPIP;
dsfield = ipv4_get_dsfield(iph);
encap_limit = t->parms.encap_limit;
memcpy(&fl, &t->fl, sizeof (fl));
- fl.proto = IPPROTO_IPV6;
+ fl.flowi_proto = IPPROTO_IPV6;
dsfield = ipv6_get_dsfield(ipv6h);
if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
/* Set up flowi template */
ipv6_addr_copy(&fl->fl6_src, &p->laddr);
ipv6_addr_copy(&fl->fl6_dst, &p->raddr);
- fl->oif = p->link;
+ fl->flowi_oif = p->link;
fl->fl6_flowlabel = 0;
if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
struct net *net = dev_net(skb->dev);
struct mr6_table *mrt;
struct flowi fl = {
- .iif = skb->dev->ifindex,
- .mark = skb->mark,
+ .flowi_iif = skb->dev->ifindex,
+ .flowi_mark = skb->mark,
};
int reg_vif_num;
struct net *net = dev_net(dev);
struct mr6_table *mrt;
struct flowi fl = {
- .oif = dev->ifindex,
- .iif = skb->skb_iif,
- .mark = skb->mark,
+ .flowi_oif = dev->ifindex,
+ .flowi_iif = skb->skb_iif,
+ .flowi_mark = skb->mark,
};
int err;
{
struct mr6_table *mrt;
struct flowi fl = {
- .iif = skb->skb_iif,
- .oif = skb->dev->ifindex,
- .mark = skb->mark,
+ .flowi_iif = skb->skb_iif,
+ .flowi_oif = skb->dev->ifindex,
+ .flowi_mark= skb->mark,
};
if (ip6mr_fib_lookup(net, &fl, &mrt) < 0)
ipv6h = ipv6_hdr(skb);
fl = (struct flowi) {
- .oif = vif->link,
+ .flowi_oif = vif->link,
.fl6_dst = ipv6h->daddr,
};
struct net *net = dev_net(skb->dev);
struct mr6_table *mrt;
struct flowi fl = {
- .iif = skb->dev->ifindex,
- .mark = skb->mark,
+ .flowi_iif = skb->dev->ifindex,
+ .flowi_mark= skb->mark,
};
int err;
int junk;
fl.fl6_flowlabel = 0;
- fl.oif = sk->sk_bound_dev_if;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = sk->sk_bound_dev_if;
+ fl.flowi_mark = sk->sk_mark;
if (optlen == 0)
goto update;
struct timeval stamp;
int err = 0;
- if (unlikely(fl->proto == IPPROTO_MH &&
+ if (unlikely(fl->flowi_proto == IPPROTO_MH &&
fl->fl_mh_type <= IP6_MH_TYPE_MAX))
goto out;
sizeof(sel.saddr));
sel.prefixlen_s = 128;
sel.family = AF_INET6;
- sel.proto = fl->proto;
+ sel.proto = fl->flowi_proto;
sel.dport = xfrm_flowi_dport(fl);
if (sel.dport)
sel.dport_mask = htons(~0);
sel.sport = xfrm_flowi_sport(fl);
if (sel.sport)
sel.sport_mask = htons(~0);
- sel.ifindex = fl->oif;
+ sel.ifindex = fl->flowi_oif;
err = km_report(net, IPPROTO_DSTOPTS, &sel,
(hao ? (xfrm_address_t *)&hao->addr : NULL));
struct ipv6hdr *iph = ipv6_hdr(skb);
struct dst_entry *dst;
struct flowi fl = {
- .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
- .mark = skb->mark,
+ .flowi_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
+ .flowi_mark = skb->mark,
.fl6_dst = iph->daddr,
.fl6_src = iph->saddr,
};
}
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_TCP;
+ fl.flowi_proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_src, &oip6h->daddr);
ipv6_addr_copy(&fl.fl6_dst, &oip6h->saddr);
fl.fl_ip_sport = otcph.dest;
csum = csum_ipv6_magic(&fl->fl6_src,
&fl->fl6_dst,
- total_len, fl->proto, tmp_csum);
+ total_len, fl->flowi_proto, tmp_csum);
- if (csum == 0 && fl->proto == IPPROTO_UDP)
+ if (csum == 0 && fl->flowi_proto == IPPROTO_UDP)
csum = CSUM_MANGLED_0;
if (skb_store_bits(skb, offset, &csum, 2))
if (!iov)
continue;
- switch (fl->proto) {
+ switch (fl->flowi_proto) {
case IPPROTO_ICMPV6:
/* check if one-byte field is readable or not. */
if (iov->iov_base && iov->iov_len < 1)
*/
memset(&fl, 0, sizeof(fl));
- fl.mark = sk->sk_mark;
+ fl.flowi_mark = sk->sk_mark;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
- fl.oif = sin6->sin6_scope_id;
+ fl.flowi_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
fl.fl6_flowlabel = np->flow_label;
}
- if (fl.oif == 0)
- fl.oif = sk->sk_bound_dev_if;
+ if (fl.flowi_oif == 0)
+ fl.flowi_oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
- fl.proto = proto;
+ fl.flowi_proto = proto;
err = rawv6_probe_proto_opt(&fl, msg);
if (err)
goto out;
final_p = fl6_update_dst(&fl, opt, &final);
- if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
- fl.oif = np->mcast_oif;
+ if (!fl.flowi_oif && ipv6_addr_is_multicast(&fl.fl6_dst))
+ fl.flowi_oif = np->mcast_oif;
security_sk_classify_flow(sk, &fl);
dst = ip6_dst_lookup_flow(sk, &fl, final_p, true);
fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
restart:
rt = fn->leaf;
- rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
+ rt = rt6_device_match(net, rt, &fl->fl6_src, fl->flowi_oif, flags);
BACKTRACK(net, &fl->fl6_src);
out:
dst_use(&rt->dst, jiffies);
const struct in6_addr *saddr, int oif, int strict)
{
struct flowi fl = {
- .oif = oif,
+ .flowi_oif = oif,
.fl6_dst = *daddr,
};
struct dst_entry *dst;
static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
struct flowi *fl, int flags)
{
- return ip6_pol_route(net, table, fl->iif, fl, flags);
+ return ip6_pol_route(net, table, fl->flowi_iif, fl, flags);
}
void ip6_route_input(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
int flags = RT6_LOOKUP_F_HAS_SADDR;
struct flowi fl = {
- .iif = skb->dev->ifindex,
+ .flowi_iif = skb->dev->ifindex,
.fl6_dst = iph->daddr,
.fl6_src = iph->saddr,
.fl6_flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
- .mark = skb->mark,
- .proto = iph->nexthdr,
+ .flowi_mark = skb->mark,
+ .flowi_proto = iph->nexthdr,
};
if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
struct flowi *fl, int flags)
{
- return ip6_pol_route(net, table, fl->oif, fl, flags);
+ return ip6_pol_route(net, table, fl->flowi_oif, fl, flags);
}
struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
continue;
if (!(rt->rt6i_flags & RTF_GATEWAY))
continue;
- if (fl->oif != rt->rt6i_dev->ifindex)
+ if (fl->flowi_oif != rt->rt6i_dev->ifindex)
continue;
if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
continue;
struct net *net = dev_net(dev);
struct ip6rd_flowi rdfl = {
.fl = {
- .oif = dev->ifindex,
+ .flowi_oif = dev->ifindex,
.fl6_dst = *dest,
.fl6_src = *src,
},
iif = nla_get_u32(tb[RTA_IIF]);
if (tb[RTA_OIF])
- fl.oif = nla_get_u32(tb[RTA_OIF]);
+ fl.flowi_oif = nla_get_u32(tb[RTA_OIF]);
if (iif) {
struct net_device *dev;
struct in6_addr *final_p, final;
struct flowi fl;
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_TCP;
+ fl.flowi_proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
final_p = fl6_update_dst(&fl, np->opt, &final);
ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
- fl.oif = sk->sk_bound_dev_if;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = sk->sk_bound_dev_if;
+ fl.flowi_mark = sk->sk_mark;
fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fl.fl_ip_sport = inet_sk(sk)->inet_sport;
security_req_classify_flow(req, &fl);
if (!ipv6_addr_any(&np->rcv_saddr))
saddr = &np->rcv_saddr;
- fl.proto = IPPROTO_TCP;
+ fl.flowi_proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src,
(saddr ? saddr : &np->saddr));
- fl.oif = sk->sk_bound_dev_if;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = sk->sk_bound_dev_if;
+ fl.flowi_mark = sk->sk_mark;
fl.fl_ip_dport = usin->sin6_port;
fl.fl_ip_sport = inet->inet_sport;
for now.
*/
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_TCP;
+ fl.flowi_proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
ipv6_addr_copy(&fl.fl6_src, &np->saddr);
- fl.oif = sk->sk_bound_dev_if;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = sk->sk_bound_dev_if;
+ fl.flowi_mark = sk->sk_mark;
fl.fl_ip_dport = inet->inet_dport;
fl.fl_ip_sport = inet->inet_sport;
security_skb_classify_flow(skb, &fl);
int err;
memset(&fl, 0, sizeof(fl));
- fl.proto = IPPROTO_TCP;
+ fl.flowi_proto = IPPROTO_TCP;
ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
fl.fl6_flowlabel = 0;
- fl.oif = treq->iif;
- fl.mark = sk->sk_mark;
+ fl.flowi_oif = treq->iif;
+ fl.flowi_mark = sk->sk_mark;
fl.fl_ip_dport = inet_rsk(req)->rmt_port;
fl.fl_ip_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, &fl);
__tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
- fl.proto = IPPROTO_TCP;
- fl.oif = inet6_iif(skb);
+ fl.flowi_proto = IPPROTO_TCP;
+ fl.flowi_oif = inet6_iif(skb);
fl.fl_ip_dport = t1->dest;
fl.fl_ip_sport = t1->source;
security_skb_classify_flow(skb, &fl);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst,
- up->len, fl->proto, csum );
+ up->len, fl->flowi_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
- fl.oif = sin6->sin6_scope_id;
+ fl.flowi_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
connected = 1;
}
- if (!fl.oif)
- fl.oif = sk->sk_bound_dev_if;
+ if (!fl.flowi_oif)
+ fl.flowi_oif = sk->sk_bound_dev_if;
- if (!fl.oif)
- fl.oif = np->sticky_pktinfo.ipi6_ifindex;
+ if (!fl.flowi_oif)
+ fl.flowi_oif = np->sticky_pktinfo.ipi6_ifindex;
- fl.mark = sk->sk_mark;
+ fl.flowi_mark = sk->sk_mark;
if (msg->msg_controllen) {
opt = &opt_space;
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
- fl.proto = sk->sk_protocol;
+ fl.flowi_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
ipv6_addr_copy(&fl.fl6_dst, daddr);
else
if (final_p)
connected = 0;
- if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
- fl.oif = np->mcast_oif;
+ if (!fl.flowi_oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
+ fl.flowi_oif = np->mcast_oif;
connected = 0;
}
u8 nexthdr = nh[IP6CB(skb)->nhoff];
memset(fl, 0, sizeof(struct flowi));
- fl->mark = skb->mark;
+ fl->flowi_mark = skb->mark;
ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
fl->fl_ip_sport = ports[!!reverse];
fl->fl_ip_dport = ports[!reverse];
}
- fl->proto = nexthdr;
+ fl->flowi_proto = nexthdr;
return;
case IPPROTO_ICMPV6:
fl->fl_icmp_type = icmp[0];
fl->fl_icmp_code = icmp[1];
}
- fl->proto = nexthdr;
+ fl->flowi_proto = nexthdr;
return;
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
fl->fl_mh_type = mh->ip6mh_type;
}
- fl->proto = nexthdr;
+ fl->flowi_proto = nexthdr;
return;
#endif
case IPPROTO_COMP:
default:
fl->fl_ipsec_spi = 0;
- fl->proto = nexthdr;
+ fl->flowi_proto = nexthdr;
return;
}
}
sel->family = AF_INET6;
sel->prefixlen_d = 128;
sel->prefixlen_s = 128;
- sel->proto = fl->proto;
- sel->ifindex = fl->oif;
+ sel->proto = fl->flowi_proto;
+ sel->ifindex = fl->flowi_oif;
}
static void
{
struct rt6_info *rt;
struct flowi fl = {
- .oif = 0,
+ .flowi_oif = 0,
.fl6_dst = *addr,
.fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
};
.fl4_dst = iph->daddr,
.fl4_src = iph->saddr,
.fl4_tos = RT_TOS(iph->tos),
- .mark = skb->mark,
+ .flowi_mark = skb->mark,
};
rt = ip_route_output_key(net, &fl);
if (info->priv) {
if (info->priv->oif == -1)
return false;
- fl.oif = info->priv->oif;
+ fl.flowi_oif = info->priv->oif;
}
fl.fl4_dst = info->gw.ip;
fl.fl4_tos = RT_TOS(iph->tos);
if (info->priv) {
if (info->priv->oif == -1)
return false;
- fl.oif = info->priv->oif;
+ fl.flowi_oif = info->priv->oif;
}
fl.fl6_dst = info->gw.in6;
fl.fl6_flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
memset(&fl, 0, sizeof(fl));
- fl.proto = sk->sk_protocol;
+ fl.flowi_proto = sk->sk_protocol;
/* Fill in the dest address from the route entry passed with the skb
* and the source address from the transport.
fl.fl6_flowlabel = np->flow_label;
IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
if (ipv6_addr_type(&fl.fl6_src) & IPV6_ADDR_LINKLOCAL)
- fl.oif = transport->saddr.v6.sin6_scope_id;
+ fl.flowi_oif = transport->saddr.v6.sin6_scope_id;
else
- fl.oif = sk->sk_bound_dev_if;
+ fl.flowi_oif = sk->sk_bound_dev_if;
if (np->opt && np->opt->srcrt) {
struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
memset(&fl, 0, sizeof(fl));
ipv6_addr_copy(&fl.fl6_dst, &daddr->v6.sin6_addr);
if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
- fl.oif = daddr->v6.sin6_scope_id;
+ fl.flowi_oif = daddr->v6.sin6_scope_id;
SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl.fl6_dst);
memset(&fl, 0x0, sizeof(struct flowi));
fl.fl4_dst = daddr->v4.sin_addr.s_addr;
fl.fl_ip_dport = daddr->v4.sin_port;
- fl.proto = IPPROTO_SCTP;
+ fl.flowi_proto = IPPROTO_SCTP;
if (asoc) {
fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk);
- fl.oif = asoc->base.sk->sk_bound_dev_if;
+ fl.flowi_oif = asoc->base.sk->sk_bound_dev_if;
fl.fl_ip_sport = htons(asoc->base.bind_addr.port);
}
if (saddr) {
addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
!((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
!((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
- (fl->proto == sel->proto || !sel->proto) &&
- (fl->oif == sel->ifindex || !sel->ifindex);
+ (fl->flowi_proto == sel->proto || !sel->proto) &&
+ (fl->flowi_oif == sel->ifindex || !sel->ifindex);
}
static inline int
addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
!((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
!((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
- (fl->proto == sel->proto || !sel->proto) &&
- (fl->oif == sel->ifindex || !sel->ifindex);
+ (fl->flowi_proto == sel->proto || !sel->proto) &&
+ (fl->flowi_oif == sel->ifindex || !sel->ifindex);
}
int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
int match, ret = -ESRCH;
if (pol->family != family ||
- (fl->mark & pol->mark.m) != pol->mark.v ||
+ (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
pol->type != type)
return ret;
match = xfrm_selector_match(sel, fl, family);
if (match)
- ret = security_xfrm_policy_lookup(pol->security, fl->secid,
+ ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
dir);
return ret;
goto out;
}
err = security_xfrm_policy_lookup(pol->security,
- fl->secid,
+ fl->flowi_secid,
policy_to_flow_dir(dir));
if (!err)
xfrm_pol_hold(pol);
return make_blackhole(net, family, dst_orig);
}
- if (fl->flags & FLOWI_FLAG_CAN_SLEEP) {
+ if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&net->xfrm.km_waitq, &wait);
return -EAFNOSUPPORT;
afinfo->decode_session(skb, fl, reverse);
- err = security_xfrm_decode_session(skb, &fl->secid);
+ err = security_xfrm_decode_session(skb, &fl->flowi_secid);
xfrm_policy_put_afinfo(afinfo);
return err;
}
xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
- error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
+ error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
if (error) {
x->km.state = XFRM_STATE_DEAD;
to_put = x;
void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
{
- security_ops->sk_getsecid(sk, &fl->secid);
+ security_ops->sk_getsecid(sk, &fl->flowi_secid);
}
EXPORT_SYMBOL(security_sk_classify_flow);
void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
{
- int rc = security_ops->xfrm_decode_session(skb, &fl->secid, 0);
+ int rc = security_ops->xfrm_decode_session(skb, &fl->flowi_secid, 0);
BUG_ON(rc);
}
static void selinux_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
{
- fl->secid = req->secid;
+ fl->flowi_secid = req->secid;
}
static int selinux_tun_dev_create(void)
state_sid = x->security->ctx_sid;
- if (fl->secid != state_sid)
+ if (fl->flowi_secid != state_sid)
return 0;
- rc = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION,
+ rc = avc_has_perm(fl->flowi_secid, state_sid, SECCLASS_ASSOCIATION,
ASSOCIATION__SENDTO,
NULL)? 0:1;