struct flow_offload_tuple tuple;
};
-#define FLOW_OFFLOAD_SNAT 0x1
-#define FLOW_OFFLOAD_DNAT 0x2
-#define FLOW_OFFLOAD_TEARDOWN 0x8
-#define FLOW_OFFLOAD_HW 0x10
-#define FLOW_OFFLOAD_HW_DYING 0x20
-#define FLOW_OFFLOAD_HW_DEAD 0x40
+enum nf_flow_flags {
+ NF_FLOW_SNAT,
+ NF_FLOW_DNAT,
+ NF_FLOW_TEARDOWN,
+ NF_FLOW_HW,
+ NF_FLOW_HW_DYING,
+ NF_FLOW_HW_DEAD,
+};
enum flow_offload_type {
NF_FLOW_OFFLOAD_UNSPEC = 0,
struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
struct nf_conn *ct;
- u16 flags;
+ unsigned long flags;
u16 type;
u32 timeout;
struct rcu_head rcu_head;
flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
if (ct->status & IPS_SRC_NAT)
- flow->flags |= FLOW_OFFLOAD_SNAT;
+ __set_bit(NF_FLOW_SNAT, &flow->flags);
if (ct->status & IPS_DST_NAT)
- flow->flags |= FLOW_OFFLOAD_DNAT;
+ __set_bit(NF_FLOW_DNAT, &flow->flags);
return flow;
if (nf_flow_has_expired(flow))
flow_offload_fixup_ct(flow->ct);
- else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
+ else if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
flow_offload_fixup_ct_timeout(flow->ct);
flow_offload_free(flow);
void flow_offload_teardown(struct flow_offload *flow)
{
- flow->flags |= FLOW_OFFLOAD_TEARDOWN;
+ set_bit(NF_FLOW_TEARDOWN, &flow->flags);
flow_offload_fixup_ct_state(flow->ct);
}
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
- if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
+ if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
return NULL;
if (unlikely(nf_ct_is_dying(flow->ct)))
struct nf_flowtable *flow_table = data;
if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
- (flow->flags & FLOW_OFFLOAD_TEARDOWN)) {
- if (flow->flags & FLOW_OFFLOAD_HW) {
- if (!(flow->flags & FLOW_OFFLOAD_HW_DYING))
+ test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+ if (test_bit(NF_FLOW_HW, &flow->flags)) {
+ if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
nf_flow_offload_del(flow_table, flow);
- else if (flow->flags & FLOW_OFFLOAD_HW_DEAD)
+ else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
flow_offload_del(flow_table, flow);
} else {
flow_offload_del(flow_table, flow);
}
- } else if (flow->flags & FLOW_OFFLOAD_HW) {
+ } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
nf_flow_offload_stats(flow_table, flow);
}
}
{
struct iphdr *iph = ip_hdr(skb);
- if (flow->flags & FLOW_OFFLOAD_SNAT &&
+ if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
(nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
return -1;
- if (flow->flags & FLOW_OFFLOAD_DNAT &&
+ if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
(nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
return -1;
struct ipv6hdr *ip6h = ipv6_hdr(skb);
unsigned int thoff = sizeof(*ip6h);
- if (flow->flags & FLOW_OFFLOAD_SNAT &&
+ if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
(nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
return -1;
- if (flow->flags & FLOW_OFFLOAD_DNAT &&
+ if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
(nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
return -1;
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
return -1;
- if (flow->flags & FLOW_OFFLOAD_SNAT) {
+ if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
flow_offload_ipv4_snat(net, flow, dir, flow_rule);
flow_offload_port_snat(net, flow, dir, flow_rule);
}
- if (flow->flags & FLOW_OFFLOAD_DNAT) {
+ if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
flow_offload_port_dnat(net, flow, dir, flow_rule);
}
- if (flow->flags & FLOW_OFFLOAD_SNAT ||
- flow->flags & FLOW_OFFLOAD_DNAT)
+ if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
+ test_bit(NF_FLOW_DNAT, &flow->flags))
flow_offload_ipv4_checksum(net, flow, flow_rule);
flow_offload_redirect(flow, dir, flow_rule);
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
return -1;
- if (flow->flags & FLOW_OFFLOAD_SNAT) {
+ if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
flow_offload_ipv6_snat(net, flow, dir, flow_rule);
flow_offload_port_snat(net, flow, dir, flow_rule);
}
- if (flow->flags & FLOW_OFFLOAD_DNAT) {
+ if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
flow_offload_port_dnat(net, flow, dir, flow_rule);
}
list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
- offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
+ set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
}
static int flow_offload_rule_add(struct flow_offload_work *offload,
case FLOW_CLS_REPLACE:
ret = flow_offload_work_add(offload);
if (ret < 0)
- offload->flow->flags &= ~FLOW_OFFLOAD_HW;
+ __clear_bit(NF_FLOW_HW, &offload->flow->flags);
break;
case FLOW_CLS_DESTROY:
flow_offload_work_del(offload);
if (!offload)
return;
- flow->flags |= FLOW_OFFLOAD_HW;
+ __set_bit(NF_FLOW_HW, &flow->flags);
flow_offload_queue_work(offload);
}
if (!offload)
return;
- flow->flags |= FLOW_OFFLOAD_HW_DYING;
+ set_bit(NF_FLOW_HW_DYING, &flow->flags);
flow_offload_queue_work(offload);
}