unsigned int i;
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
- if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
+ if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
word[i] = (__force u32)cpu_to_be32(word[i]);
}
+/* Mark value as unsafely initialized in case it becomes atomic later
+ * and we didn't byte swap something non-byte swap neutral.
+ */
+static void
+nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
+{
+ u32 *word = value;
+ unsigned int i;
+
+ for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
+ if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
+ word[i] != (__force u32)cpu_to_be32(word[i]))
+ nfp_map->use_map[i].non_zero_update = 1;
+}
+
static int
nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
void *key, void *value, u64 flags)
{
nfp_map_bpf_byte_swap(offmap->dev_priv, value);
+ nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
}
nfp_prog->adjust_head_location = location;
}
+static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
+ const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
+ struct bpf_offloaded_map *offmap;
+ struct bpf_func_state *state;
+ struct nfp_bpf_map *nfp_map;
+ int off, i;
+
+ state = env->cur_state->frame[reg3->frameno];
+
+ /* We need to record each time update happens with non-zero words,
+ * in case such word is used in atomic operations.
+ * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before.
+ */
+
+ offmap = map_to_offmap(reg1->map_ptr);
+ nfp_map = offmap->dev_priv;
+ off = reg3->off + reg3->var_off.value;
+
+ for (i = 0; i < offmap->map.value_size; i++) {
+ struct bpf_stack_state *stack_entry;
+ unsigned int soff;
+
+ soff = -(off + i) - 1;
+ stack_entry = &state->stack[soff / BPF_REG_SIZE];
+ if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO)
+ continue;
+
+ if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
+ pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
+ i, soff);
+ return false;
+ }
+ nfp_map->use_map[i / 4].non_zero_update = 1;
+ }
+
+ return true;
+}
+
static int
nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
bpf->helpers.map_update, reg1) ||
!nfp_bpf_stack_arg_ok("map_update", env, reg2,
meta->func_id ? &meta->arg2 : NULL) ||
- !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
+ !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) ||
+ !nfp_bpf_map_update_value_ok(env))
return -EOPNOTSUPP;
break;
struct nfp_bpf_map *nfp_map,
unsigned int off, enum nfp_bpf_map_use use)
{
- if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
- nfp_map->use_map[off / 4] != use) {
+ if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
+ nfp_map->use_map[off / 4].type != use) {
pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
- nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
+ nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
nfp_bpf_map_use_name(use), off);
return -EOPNOTSUPP;
}
- nfp_map->use_map[off / 4] = use;
+ if (nfp_map->use_map[off / 4].non_zero_update &&
+ use == NFP_MAP_USE_ATOMIC_CNT) {
+ pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
+ off);
+ return -EOPNOTSUPP;
+ }
+
+ nfp_map->use_map[off / 4].type = use;
return 0;
}