struct in6_addr dst;
};
+/**
+ * struct flow_dissector_key_tipc_addrs:
+ * @srcnode: source node address
+ */
+struct flow_dissector_key_tipc_addrs {
+ __be32 srcnode;
+};
+
/**
* struct flow_dissector_key_addrs:
* @v4addrs: IPv4 addresses
union {
struct flow_dissector_key_ipv4_addrs v4addrs;
struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_tipc_addrs tipcaddrs;
};
};
FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, /* struct flow_dissector_key_addrs */
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
FLOW_DISSECTOR_KEY_MAX,
};
key_control->thoff = (u16)nhoff;
if (skb_flow_dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS)) {
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
key_addrs = skb_flow_dissector_target(flow_dissector,
- FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS,
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS,
target_container);
- key_addrs->v4addrs.src = hdr->srcnode;
- key_addrs->v4addrs.dst = 0;
- key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ key_addrs->tipcaddrs.srcnode = hdr->srcnode;
+ key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
}
return true;
}
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
diff -= sizeof(flow->addrs.v6addrs);
break;
+ case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+ diff -= sizeof(flow->addrs.tipcaddrs);
+ break;
}
return (sizeof(*flow) - diff) / sizeof(u32);
}
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
return (__force __be32)ipv6_addr_hash(
&flow->addrs.v6addrs.src);
+ case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+ return flow->addrs.tipcaddrs.srcnode;
default:
return 0;
}
.key_id = FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS,
.offset = offsetof(struct flow_keys, addrs.v4addrs),
},
+ {
+ .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.tipcaddrs),
+ },
{
.key_id = FLOW_DISSECTOR_KEY_PORTS,
.offset = offsetof(struct flow_keys, ports),