selftests/bpf: add verifier tests for bpf_call
authorAlexei Starovoitov <ast@fb.com>
Fri, 15 Dec 2017 01:55:07 +0000 (17:55 -0800)
committerDaniel Borkmann <daniel@iogearbox.net>
Sun, 17 Dec 2017 19:34:35 +0000 (20:34 +0100)
Add extensive set of tests for bpf_call verification logic:

calls: basic sanity
calls: using r0 returned by callee
calls: callee is using r1
calls: callee using args1
calls: callee using wrong args2
calls: callee using two args
calls: callee changing pkt pointers
calls: two calls with args
calls: two calls with bad jump
calls: recursive call. test1
calls: recursive call. test2
calls: unreachable code
calls: invalid call
calls: jumping across function bodies. test1
calls: jumping across function bodies. test2
calls: call without exit
calls: call into middle of ld_imm64
calls: call into middle of other call
calls: two calls with bad fallthrough
calls: two calls with stack read
calls: two calls with stack write
calls: spill into caller stack frame
calls: two calls with stack write and void return
calls: ambiguous return value
calls: two calls that return map_value
calls: two calls that return map_value with bool condition
calls: two calls that return map_value with incorrect bool check
calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1
calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2
calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3
calls: two calls that receive map_value_ptr_or_null via arg. test1
calls: two calls that receive map_value_ptr_or_null via arg. test2
calls: pkt_ptr spill into caller stack

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
tools/testing/selftests/bpf/test_verifier.c

index 3c64f30cf63cc2b6adb532a3b1f3201533193f7f..88f389c6ec4847a746bf21929899665c8762abb6 100644 (file)
@@ -2,6 +2,7 @@
  * Testsuite for eBPF verifier
  *
  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2017 Facebook
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -277,7 +278,7 @@ static struct bpf_test tests[] = {
                .insns = {
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
                },
-               .errstr = "jump out of range",
+               .errstr = "not an exit",
                .result = REJECT,
        },
        {
@@ -8097,6 +8098,964 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
        },
+       {
+               "calls: basic sanity",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .result = ACCEPT,
+       },
+       {
+               "calls: using r0 returned by callee",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .result = ACCEPT,
+       },
+       {
+               "calls: callee is using r1",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+               .result = ACCEPT,
+       },
+       {
+               "calls: callee using args1",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "allowed for root only",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "calls: callee using wrong args2",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "R2 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "calls: callee using two args",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+                                   offsetof(struct __sk_buff, len)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+                                   offsetof(struct __sk_buff, len)),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "allowed for root only",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "calls: callee changing pkt pointers",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       /* clear_all_pkt_pointers() has to walk all frames
+                        * to make sure that pkt pointers in the caller
+                        * are cleared when callee is calling a helper that
+                        * adjusts packet size
+                        */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_xdp_adjust_head),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "R6 invalid mem access 'inv'",
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
+       {
+               "calls: two calls with args",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
+       {
+               "calls: two calls with bad jump",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "jump out of range from insn 11 to 9",
+               .result = REJECT,
+       },
+       {
+               "calls: recursive call. test1",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "back-edge",
+               .result = REJECT,
+       },
+       {
+               "calls: recursive call. test2",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "back-edge",
+               .result = REJECT,
+       },
+       {
+               "calls: unreachable code",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "unreachable insn 6",
+               .result = REJECT,
+       },
+       {
+               "calls: invalid call",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "invalid destination",
+               .result = REJECT,
+       },
+       {
+               "calls: jumping across function bodies. test1",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "jump out of range",
+               .result = REJECT,
+       },
+       {
+               "calls: jumping across function bodies. test2",
+               .insns = {
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "jump out of range",
+               .result = REJECT,
+       },
+       {
+               "calls: call without exit",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "not an exit",
+               .result = REJECT,
+       },
+       {
+               "calls: call into middle of ld_imm64",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_LD_IMM64(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "last insn",
+               .result = REJECT,
+       },
+       {
+               "calls: call into middle of other call",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "last insn",
+               .result = REJECT,
+       },
+       {
+               "calls: two calls with bad fallthrough",
+               .insns = {
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+               .errstr = "not an exit",
+               .result = REJECT,
+       },
+       {
+               "calls: two calls with stack read",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .result = ACCEPT,
+       },
+       {
+               "calls: two calls with stack write",
+               .insns = {
+                       /* main prog */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+                       /* write into stack frame of main prog */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       /* read from stack frame of main prog */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .result = ACCEPT,
+       },
+       {
+               "calls: spill into caller stack frame",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .errstr = "cannot spill",
+               .result = REJECT,
+       },
+       {
+               "calls: two calls with stack write and void return",
+               .insns = {
+                       /* main prog */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       /* write into stack frame of main prog */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+                       BPF_EXIT_INSN(), /* void return */
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .result = ACCEPT,
+       },
+       {
+               "calls: ambiguous return value",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "allowed for root only",
+               .result_unpriv = REJECT,
+               .errstr = "R0 !read_ok",
+               .result = REJECT,
+       },
+       {
+               "calls: two calls that return map_value",
+               .insns = {
+                       /* main prog */
+                       /* pass fp-16, fp-8 into a function */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       /* fetch secound map_value_ptr from the stack */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       /* call 3rd function twice */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       /* first time with fp-8 */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       /* second time with fp-16 */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       /* lookup from map */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       /* write map_value_ptr into stack frame of main prog */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(), /* return 0 */
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .fixup_map1 = { 23 },
+               .result = ACCEPT,
+       },
+       {
+               "calls: two calls that return map_value with bool condition",
+               .insns = {
+                       /* main prog */
+                       /* pass fp-16, fp-8 into a function */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       /* call 3rd function twice */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       /* first time with fp-8 */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       /* second time with fp-16 */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+                       /* fetch secound map_value_ptr from the stack */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       /* lookup from map */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(), /* return 0 */
+                       /* write map_value_ptr into stack frame of main prog */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(), /* return 1 */
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .fixup_map1 = { 23 },
+               .result = ACCEPT,
+       },
+       {
+               "calls: two calls that return map_value with incorrect bool check",
+               .insns = {
+                       /* main prog */
+                       /* pass fp-16, fp-8 into a function */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       /* call 3rd function twice */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       /* first time with fp-8 */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       /* second time with fp-16 */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       /* fetch secound map_value_ptr from the stack */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       /* lookup from map */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(), /* return 0 */
+                       /* write map_value_ptr into stack frame of main prog */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(), /* return 1 */
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .fixup_map1 = { 23 },
+               .result = REJECT,
+               .errstr = "invalid read from stack off -16+0 size 8",
+       },
+       {
+               "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+               .insns = {
+                       /* main prog */
+                       /* pass fp-16, fp-8 into a function */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       /* 1st lookup from map */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_8, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       /* write map_value_ptr into stack frame of main prog at fp-8 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_8, 1),
+
+                       /* 2nd lookup from map */
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_9, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       /* write map_value_ptr into stack frame of main prog at fp-16 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_9, 1),
+
+                       /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       /* if arg2 == 1 do *arg1 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+                       /* if arg4 == 1 do *arg3 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .fixup_map1 = { 12, 22 },
+               .result = REJECT,
+               .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+       },
+       {
+               "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+               .insns = {
+                       /* main prog */
+                       /* pass fp-16, fp-8 into a function */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       /* 1st lookup from map */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_8, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       /* write map_value_ptr into stack frame of main prog at fp-8 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_8, 1),
+
+                       /* 2nd lookup from map */
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_9, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       /* write map_value_ptr into stack frame of main prog at fp-16 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_9, 1),
+
+                       /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       /* if arg2 == 1 do *arg1 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+                       /* if arg4 == 1 do *arg3 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .fixup_map1 = { 12, 22 },
+               .result = ACCEPT,
+       },
+       {
+               "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+               .insns = {
+                       /* main prog */
+                       /* pass fp-16, fp-8 into a function */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       /* 1st lookup from map */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_8, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       /* write map_value_ptr into stack frame of main prog at fp-8 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_8, 1),
+
+                       /* 2nd lookup from map */
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+                       /* write map_value_ptr into stack frame of main prog at fp-16 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_9, 1),
+
+                       /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+                       /* subprog 2 */
+                       /* if arg2 == 1 do *arg1 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+                       /* if arg4 == 1 do *arg3 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .fixup_map1 = { 12, 22 },
+               .result = REJECT,
+               .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+       },
+       {
+               "calls: two calls that receive map_value_ptr_or_null via arg. test1",
+               .insns = {
+                       /* main prog */
+                       /* pass fp-16, fp-8 into a function */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       /* 1st lookup from map */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_8, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_8, 1),
+
+                       /* 2nd lookup from map */
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_9, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_9, 1),
+
+                       /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       /* if arg2 == 1 do *arg1 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+                       /* if arg4 == 1 do *arg3 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .fixup_map1 = { 12, 22 },
+               .result = ACCEPT,
+       },
+       {
+               "calls: two calls that receive map_value_ptr_or_null via arg. test2",
+               .insns = {
+                       /* main prog */
+                       /* pass fp-16, fp-8 into a function */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       /* 1st lookup from map */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_8, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_8, 1),
+
+                       /* 2nd lookup from map */
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_9, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_9, 1),
+
+                       /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 2 */
+                       /* if arg2 == 1 do *arg1 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+                       /* if arg4 == 0 do *arg3 = 0 */
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+                       /* fetch map_value_ptr from the stack of this function */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+                       /* write into map value */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .fixup_map1 = { 12, 22 },
+               .result = REJECT,
+               .errstr = "R0 invalid mem access 'inv'",
+       },
+       {
+               "calls: pkt_ptr spill into caller stack",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+
+                       /* subprog 1 */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       /* spill unchecked pkt_ptr into stack of caller */
+                       BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+                       /* now the pkt range is verified, read pkt_ptr from stack */
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+                       /* write 4 bytes into packet */
+                       BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)