bpf: Add generic support for update and delete batch ops
authorBrian Vazquez <brianvv@google.com>
Wed, 15 Jan 2020 18:43:02 +0000 (10:43 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 15 Jan 2020 22:00:35 +0000 (14:00 -0800)
This commit adds generic support for update and delete batch ops that
can be used for almost all the bpf maps. These commands share the same
UAPI attr that lookup and lookup_and_delete batch ops use and the
syscall commands are:

  BPF_MAP_UPDATE_BATCH
  BPF_MAP_DELETE_BATCH

The main difference between update/delete and lookup batch ops is that
for update/delete keys/values must be specified for userspace and
because of that, neither in_batch nor out_batch are used.

Suggested-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Brian Vazquez <brianvv@google.com>
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200115184308.162644-4-brianvv@google.com
include/linux/bpf.h
include/uapi/linux/bpf.h
kernel/bpf/syscall.c

index 807744ecaa5a1a75542a32ffcde00bde971132eb..05466ad6cf1c5b5a101e5e3930ecf2f6318d5b2a 100644 (file)
@@ -46,6 +46,10 @@ struct bpf_map_ops {
        void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
        int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
                                union bpf_attr __user *uattr);
+       int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+                               union bpf_attr __user *uattr);
+       int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
+                               union bpf_attr __user *uattr);
 
        /* funcs callable from userspace and from eBPF programs */
        void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -987,6 +991,12 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
 int  generic_map_lookup_batch(struct bpf_map *map,
                              const union bpf_attr *attr,
                              union bpf_attr __user *uattr);
+int  generic_map_update_batch(struct bpf_map *map,
+                             const union bpf_attr *attr,
+                             union bpf_attr __user *uattr);
+int  generic_map_delete_batch(struct bpf_map *map,
+                             const union bpf_attr *attr,
+                             union bpf_attr __user *uattr);
 
 extern int sysctl_unprivileged_bpf_disabled;
 
index 0721b2c6bb8c554e5feebcb1b3d7e464b3799eef..d5320b0be459af31982d9d11af616f1a7d50eb3f 100644 (file)
@@ -108,6 +108,8 @@ enum bpf_cmd {
        BPF_MAP_FREEZE,
        BPF_BTF_GET_NEXT_ID,
        BPF_MAP_LOOKUP_BATCH,
+       BPF_MAP_UPDATE_BATCH,
+       BPF_MAP_DELETE_BATCH,
 };
 
 enum bpf_map_type {
index d604ddbb1afbe18af2c3863f2283520c789b0723..ce8244d1ba99c03d59646d693be33870d9cb577d 100644 (file)
@@ -1218,6 +1218,111 @@ err_put:
        return err;
 }
 
+int generic_map_delete_batch(struct bpf_map *map,
+                            const union bpf_attr *attr,
+                            union bpf_attr __user *uattr)
+{
+       void __user *keys = u64_to_user_ptr(attr->batch.keys);
+       u32 cp, max_count;
+       int err = 0;
+       void *key;
+
+       if (attr->batch.elem_flags & ~BPF_F_LOCK)
+               return -EINVAL;
+
+       if ((attr->batch.elem_flags & BPF_F_LOCK) &&
+           !map_value_has_spin_lock(map)) {
+               return -EINVAL;
+       }
+
+       max_count = attr->batch.count;
+       if (!max_count)
+               return 0;
+
+       for (cp = 0; cp < max_count; cp++) {
+               key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
+               if (IS_ERR(key)) {
+                       err = PTR_ERR(key);
+                       break;
+               }
+
+               if (bpf_map_is_dev_bound(map)) {
+                       err = bpf_map_offload_delete_elem(map, key);
+                       break;
+               }
+
+               preempt_disable();
+               __this_cpu_inc(bpf_prog_active);
+               rcu_read_lock();
+               err = map->ops->map_delete_elem(map, key);
+               rcu_read_unlock();
+               __this_cpu_dec(bpf_prog_active);
+               preempt_enable();
+               maybe_wait_bpf_programs(map);
+               if (err)
+                       break;
+       }
+       if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
+               err = -EFAULT;
+       return err;
+}
+
+int generic_map_update_batch(struct bpf_map *map,
+                            const union bpf_attr *attr,
+                            union bpf_attr __user *uattr)
+{
+       void __user *values = u64_to_user_ptr(attr->batch.values);
+       void __user *keys = u64_to_user_ptr(attr->batch.keys);
+       u32 value_size, cp, max_count;
+       int ufd = attr->map_fd;
+       void *key, *value;
+       struct fd f;
+       int err = 0;
+
+       f = fdget(ufd);
+       if (attr->batch.elem_flags & ~BPF_F_LOCK)
+               return -EINVAL;
+
+       if ((attr->batch.elem_flags & BPF_F_LOCK) &&
+           !map_value_has_spin_lock(map)) {
+               return -EINVAL;
+       }
+
+       value_size = bpf_map_value_size(map);
+
+       max_count = attr->batch.count;
+       if (!max_count)
+               return 0;
+
+       value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
+       if (!value)
+               return -ENOMEM;
+
+       for (cp = 0; cp < max_count; cp++) {
+               key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
+               if (IS_ERR(key)) {
+                       err = PTR_ERR(key);
+                       break;
+               }
+               err = -EFAULT;
+               if (copy_from_user(value, values + cp * value_size, value_size))
+                       break;
+
+               err = bpf_map_update_value(map, f, key, value,
+                                          attr->batch.elem_flags);
+
+               if (err)
+                       break;
+       }
+
+       if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
+               err = -EFAULT;
+
+       kfree(value);
+       kfree(key);
+       return err;
+}
+
 #define MAP_LOOKUP_RETRIES 3
 
 int generic_map_lookup_batch(struct bpf_map *map,
@@ -3219,6 +3324,10 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
 
        if (cmd == BPF_MAP_LOOKUP_BATCH)
                BPF_DO_BATCH(map->ops->map_lookup_batch);
+       else if (cmd == BPF_MAP_UPDATE_BATCH)
+               BPF_DO_BATCH(map->ops->map_update_batch);
+       else
+               BPF_DO_BATCH(map->ops->map_delete_batch);
 
 err_put:
        fdput(f);
@@ -3325,6 +3434,12 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_MAP_LOOKUP_BATCH:
                err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
                break;
+       case BPF_MAP_UPDATE_BATCH:
+               err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
+               break;
+       case BPF_MAP_DELETE_BATCH:
+               err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
+               break;
        default:
                err = -EINVAL;
                break;