iplen += sizeof(*uhdr);
ihdr->tot_len = htons(iplen);
ihdr->frag_off = 0;
- ihdr->saddr = 0;
+ ihdr->saddr = htonl(attr->ip_src);
ihdr->daddr = htonl(attr->ip_dst);
ihdr->tos = 0;
ihdr->id = 0;
return stmmac_test_vlanoff_common(priv, true);
}
+#ifdef CONFIG_NET_CLS_ACT
+static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
+ u32 dst_mask, u32 src_mask)
+{
+ struct flow_dissector_key_ipv4_addrs key, mask;
+ unsigned long dummy_cookie = 0xdeadbeef;
+ struct stmmac_packet_attrs attr = { };
+ struct flow_dissector *dissector;
+ struct flow_cls_offload *cls;
+ struct flow_rule *rule;
+ int ret;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.l3l4fnum)
+ return -EOPNOTSUPP;
+ if (priv->rss.enable) {
+ struct stmmac_rss rss = { .enable = false, };
+
+ stmmac_rss_configure(priv, priv->hw, &rss,
+ priv->plat->rx_queues_to_use);
+ }
+
+ dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
+ if (!dissector) {
+ ret = -ENOMEM;
+ goto cleanup_rss;
+ }
+
+ dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
+
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+ if (!cls) {
+ ret = -ENOMEM;
+ goto cleanup_dissector;
+ }
+
+ cls->common.chain_index = 0;
+ cls->command = FLOW_CLS_REPLACE;
+ cls->cookie = dummy_cookie;
+
+ rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
+ if (!rule) {
+ ret = -ENOMEM;
+ goto cleanup_cls;
+ }
+
+ rule->match.dissector = dissector;
+ rule->match.key = (void *)&key;
+ rule->match.mask = (void *)&mask;
+
+ key.src = htonl(src);
+ key.dst = htonl(dst);
+ mask.src = src_mask;
+ mask.dst = dst_mask;
+
+ cls->rule = rule;
+
+ rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.num_entries = 1;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.ip_dst = dst;
+ attr.ip_src = src;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup_rule;
+
+ ret = stmmac_tc_setup_cls(priv, priv, cls);
+ if (ret)
+ goto cleanup_rule;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL;
+
+ cls->command = FLOW_CLS_DESTROY;
+ stmmac_tc_setup_cls(priv, priv, cls);
+cleanup_rule:
+ kfree(rule);
+cleanup_cls:
+ kfree(cls);
+cleanup_dissector:
+ kfree(dissector);
+cleanup_rss:
+ if (priv->rss.enable) {
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+ }
+
+ return ret;
+}
+#else
+static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
+ u32 dst_mask, u32 src_mask)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
+{
+ u32 addr = 0x10203040;
+
+ return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
+}
+
+static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
+{
+ u32 addr = 0x10203040;
+
+ return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
+ u32 dst_mask, u32 src_mask, bool udp)
+{
+ struct {
+ struct flow_dissector_key_basic bkey;
+ struct flow_dissector_key_ports key;
+ } __aligned(BITS_PER_LONG / 8) keys;
+ struct {
+ struct flow_dissector_key_basic bmask;
+ struct flow_dissector_key_ports mask;
+ } __aligned(BITS_PER_LONG / 8) masks;
+ unsigned long dummy_cookie = 0xdeadbeef;
+ struct stmmac_packet_attrs attr = { };
+ struct flow_dissector *dissector;
+ struct flow_cls_offload *cls;
+ struct flow_rule *rule;
+ int ret;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.l3l4fnum)
+ return -EOPNOTSUPP;
+ if (priv->rss.enable) {
+ struct stmmac_rss rss = { .enable = false, };
+
+ stmmac_rss_configure(priv, priv->hw, &rss,
+ priv->plat->rx_queues_to_use);
+ }
+
+ dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
+ if (!dissector) {
+ ret = -ENOMEM;
+ goto cleanup_rss;
+ }
+
+ dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
+ dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
+ dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
+ dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
+
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+ if (!cls) {
+ ret = -ENOMEM;
+ goto cleanup_dissector;
+ }
+
+ cls->common.chain_index = 0;
+ cls->command = FLOW_CLS_REPLACE;
+ cls->cookie = dummy_cookie;
+
+ rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
+ if (!rule) {
+ ret = -ENOMEM;
+ goto cleanup_cls;
+ }
+
+ rule->match.dissector = dissector;
+ rule->match.key = (void *)&keys;
+ rule->match.mask = (void *)&masks;
+
+ keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
+ keys.key.src = htons(src);
+ keys.key.dst = htons(dst);
+ masks.mask.src = src_mask;
+ masks.mask.dst = dst_mask;
+
+ cls->rule = rule;
+
+ rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.num_entries = 1;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.tcp = !udp;
+ attr.sport = src;
+ attr.dport = dst;
+ attr.ip_dst = 0;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup_rule;
+
+ ret = stmmac_tc_setup_cls(priv, priv, cls);
+ if (ret)
+ goto cleanup_rule;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL;
+
+ cls->command = FLOW_CLS_DESTROY;
+ stmmac_tc_setup_cls(priv, priv, cls);
+cleanup_rule:
+ kfree(rule);
+cleanup_cls:
+ kfree(cls);
+cleanup_dissector:
+ kfree(dissector);
+cleanup_rss:
+ if (priv->rss.enable) {
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+ }
+
+ return ret;
+}
+#else
+static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
+ u32 dst_mask, u32 src_mask, bool udp)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
+{
+ u16 dummy_port = 0x123;
+
+ return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
+}
+
+static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
+{
+ u16 dummy_port = 0x123;
+
+ return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
+}
+
+static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
+{
+ u16 dummy_port = 0x123;
+
+ return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
+}
+
+static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
+{
+ u16 dummy_port = 0x123;
+
+ return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
+}
+
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
.name = "SVLAN TX Insertion ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_svlanoff,
+ }, {
+ .name = "L3 DA Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l3filt_da,
+ }, {
+ .name = "L3 SA Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l3filt_sa,
+ }, {
+ .name = "L4 DA TCP Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l4filt_da_tcp,
+ }, {
+ .name = "L4 SA TCP Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l4filt_sa_tcp,
+ }, {
+ .name = "L4 DA UDP Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l4filt_da_udp,
+ }, {
+ .name = "L4 SA UDP Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l4filt_sa_udp,
},
};