selftests/bpf: Test narrow loads with off > 0 for bpf_sock_addr
authorAndrey Ignatov <rdna@fb.com>
Sun, 11 Nov 2018 06:15:15 +0000 (22:15 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Sun, 11 Nov 2018 06:29:59 +0000 (22:29 -0800)
Add more test cases for context bpf_sock_addr to test narrow loads with
offset > 0 for ctx->user_ip4 field (__u32):
* off=1, size=1;
* off=2, size=1;
* off=3, size=1;
* off=2, size=2.

Signed-off-by: Andrey Ignatov <rdna@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/test_sock_addr.c

index aeeb76a54d633e9e443fdecef7160c924efeb806..73b7493d4120991527b61a2a33a9c0784542176b 100644 (file)
@@ -574,24 +574,44 @@ static int bind4_prog_load(const struct sock_addr_test *test)
                /* if (sk.family == AF_INET && */
                BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                            offsetof(struct bpf_sock_addr, family)),
-               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 16),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 24),
 
                /*     (sk.type == SOCK_DGRAM || sk.type == SOCK_STREAM) && */
                BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
                            offsetof(struct bpf_sock_addr, type)),
                BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 1),
                BPF_JMP_A(1),
-               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 12),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 20),
 
                /*     1st_byte_of_user_ip4 == expected && */
                BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
                            offsetof(struct bpf_sock_addr, user_ip4)),
-               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 10),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 18),
+
+               /*     2nd_byte_of_user_ip4 == expected && */
+               BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
+                           offsetof(struct bpf_sock_addr, user_ip4) + 1),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[1], 16),
+
+               /*     3rd_byte_of_user_ip4 == expected && */
+               BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
+                           offsetof(struct bpf_sock_addr, user_ip4) + 2),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[2], 14),
+
+               /*     4th_byte_of_user_ip4 == expected && */
+               BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
+                           offsetof(struct bpf_sock_addr, user_ip4) + 3),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[3], 12),
 
                /*     1st_half_of_user_ip4 == expected && */
                BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
                            offsetof(struct bpf_sock_addr, user_ip4)),
-               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 8),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 10),
+
+               /*     2nd_half_of_user_ip4 == expected && */
+               BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
+                           offsetof(struct bpf_sock_addr, user_ip4) + 2),
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[1], 8),
 
                /*     whole_user_ip4 == expected) { */
                BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,