887cb5229edd1d2e3b6f495acbcc942a0b3986fb
[openwrt/staging/ynezz.git] /
1 From 81676eb0adad9931279470559107f75741ba957c Mon Sep 17 00:00:00 2001
2 From: "Jason A. Donenfeld" <Jason@zx2c4.com>
3 Date: Wed, 6 May 2020 15:33:03 -0600
4 Subject: [PATCH 099/124] wireguard: socket: remove errant restriction on
5 looping to self
6
7 commit b673e24aad36981f327a6570412ffa7754de8911 upstream.
8
9 It's already possible to create two different interfaces and loop
10 packets between them. This has always been possible with tunnels in the
11 kernel, and isn't specific to wireguard. Therefore, the networking stack
12 already needs to deal with that. At the very least, the packet winds up
13 exceeding the MTU and is discarded at that point. So, since this is
14 already something that happens, there's no need to forbid the not very
15 exceptional case of routing a packet back to the same interface; this
16 loop is no different than others, and we shouldn't special case it, but
17 rather rely on generic handling of loops in general. This also makes it
18 easier to do interesting things with wireguard such as onion routing.
19
20 At the same time, we add a selftest for this, ensuring that both onion
21 routing works and infinite routing loops do not crash the kernel. We
22 also add a test case for wireguard interfaces nesting packets and
23 sending traffic between each other, as well as the loop in this case
24 too. We make sure to send some throughput-heavy traffic for this use
25 case, to stress out any possible recursion issues with the locks around
26 workqueues.
27
28 Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
29 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
30 Signed-off-by: David S. Miller <davem@davemloft.net>
31 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
32 ---
33 drivers/net/wireguard/socket.c | 12 -----
34 tools/testing/selftests/wireguard/netns.sh | 54 ++++++++++++++++++++--
35 2 files changed, 51 insertions(+), 15 deletions(-)
36
37 --- a/drivers/net/wireguard/socket.c
38 +++ b/drivers/net/wireguard/socket.c
39 @@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, s
40 net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
41 wg->dev->name, &endpoint->addr, ret);
42 goto err;
43 - } else if (unlikely(rt->dst.dev == skb->dev)) {
44 - ip_rt_put(rt);
45 - ret = -ELOOP;
46 - net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
47 - wg->dev->name, &endpoint->addr);
48 - goto err;
49 }
50 if (cache)
51 dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
52 @@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, s
53 net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
54 wg->dev->name, &endpoint->addr, ret);
55 goto err;
56 - } else if (unlikely(dst->dev == skb->dev)) {
57 - dst_release(dst);
58 - ret = -ELOOP;
59 - net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
60 - wg->dev->name, &endpoint->addr);
61 - goto err;
62 }
63 if (cache)
64 dst_cache_set_ip6(cache, dst, &fl.saddr);
65 --- a/tools/testing/selftests/wireguard/netns.sh
66 +++ b/tools/testing/selftests/wireguard/netns.sh
67 @@ -48,8 +48,11 @@ cleanup() {
68 exec 2>/dev/null
69 printf "$orig_message_cost" > /proc/sys/net/core/message_cost
70 ip0 link del dev wg0
71 + ip0 link del dev wg1
72 ip1 link del dev wg0
73 + ip1 link del dev wg1
74 ip2 link del dev wg0
75 + ip2 link del dev wg1
76 local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)"
77 [[ -n $to_kill ]] && kill $to_kill
78 pp ip netns del $netns1
79 @@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2
80 key1="$(pp wg genkey)"
81 key2="$(pp wg genkey)"
82 key3="$(pp wg genkey)"
83 +key4="$(pp wg genkey)"
84 pub1="$(pp wg pubkey <<<"$key1")"
85 pub2="$(pp wg pubkey <<<"$key2")"
86 pub3="$(pp wg pubkey <<<"$key3")"
87 +pub4="$(pp wg pubkey <<<"$key4")"
88 psk="$(pp wg genpsk)"
89 [[ -n $key1 && -n $key2 && -n $psk ]]
90
91 configure_peers() {
92 ip1 addr add 192.168.241.1/24 dev wg0
93 - ip1 addr add fd00::1/24 dev wg0
94 + ip1 addr add fd00::1/112 dev wg0
95
96 ip2 addr add 192.168.241.2/24 dev wg0
97 - ip2 addr add fd00::2/24 dev wg0
98 + ip2 addr add fd00::2/112 dev wg0
99
100 n1 wg set wg0 \
101 private-key <(echo "$key1") \
102 @@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2
103 n1 wg set wg0 private-key <(echo "$key3")
104 n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove
105 n1 ping -W 1 -c 1 192.168.241.2
106 +n2 wg set wg0 peer "$pub3" remove
107
108 -ip1 link del wg0
109 +# Test that we can route wg through wg
110 +ip1 addr flush dev wg0
111 +ip2 addr flush dev wg0
112 +ip1 addr add fd00::5:1/112 dev wg0
113 +ip2 addr add fd00::5:2/112 dev wg0
114 +n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2
115 +n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998
116 +ip1 link add wg1 type wireguard
117 +ip2 link add wg1 type wireguard
118 +ip1 addr add 192.168.241.1/24 dev wg1
119 +ip1 addr add fd00::1/112 dev wg1
120 +ip2 addr add 192.168.241.2/24 dev wg1
121 +ip2 addr add fd00::2/112 dev wg1
122 +ip1 link set mtu 1340 up dev wg1
123 +ip2 link set mtu 1340 up dev wg1
124 +n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5
125 +n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5
126 +tests
127 +# Try to set up a routing loop between the two namespaces
128 +ip1 link set netns $netns0 dev wg1
129 +ip0 addr add 192.168.241.1/24 dev wg1
130 +ip0 link set up dev wg1
131 +n0 ping -W 1 -c 1 192.168.241.2
132 +n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
133 ip2 link del wg0
134 +ip2 link del wg1
135 +! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
136 +
137 +ip0 link del wg1
138 +ip1 link del wg0
139
140 # Test using NAT. We now change the topology to this:
141 # ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐
142 @@ -282,6 +316,20 @@ pp sleep 3
143 n2 ping -W 1 -c 1 192.168.241.1
144 n1 wg set wg0 peer "$pub2" persistent-keepalive 0
145
146 +# Test that onion routing works, even when it loops
147 +n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5
148 +ip1 addr add 192.168.242.1/24 dev wg0
149 +ip2 link add wg1 type wireguard
150 +ip2 addr add 192.168.242.2/24 dev wg1
151 +n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32
152 +ip2 link set wg1 up
153 +n1 ping -W 1 -c 1 192.168.242.2
154 +ip2 link del wg1
155 +n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5
156 +! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel
157 +n1 wg set wg0 peer "$pub3" remove
158 +ip1 addr del 192.168.242.1/24 dev wg0
159 +
160 # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs.
161 ip1 -6 addr add fc00::9/96 dev vethc
162 ip1 -6 route add default via fc00::1