bpf: hash: move select_bucket() out of htab's spinlock
authortom.leiming@gmail.com <tom.leiming@gmail.com>
Tue, 29 Dec 2015 14:40:26 +0000 (22:40 +0800)
committerDavid S. Miller <davem@davemloft.net>
Tue, 29 Dec 2015 20:13:44 +0000 (15:13 -0500)
The spinlock is just used for protecting the per-bucket
hlist, so it isn't needed for selecting bucket.

Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
kernel/bpf/hashtab.c

index 2615388009a47783bcc205de50550bb8815ed102..d857fcb3607b6b958d9b7c9f71a148ebadd1425d 100644 (file)
@@ -248,12 +248,11 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
        memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
 
        l_new->hash = htab_map_hash(l_new->key, key_size);
+       head = select_bucket(htab, l_new->hash);
 
        /* bpf_map_update_elem() can be called in_irq() */
        raw_spin_lock_irqsave(&htab->lock, flags);
 
-       head = select_bucket(htab, l_new->hash);
-
        l_old = lookup_elem_raw(head, l_new->hash, key, key_size);
 
        if (!l_old && unlikely(atomic_read(&htab->count) >= map->max_entries)) {
@@ -310,11 +309,10 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
        key_size = map->key_size;
 
        hash = htab_map_hash(key, key_size);
+       head = select_bucket(htab, hash);
 
        raw_spin_lock_irqsave(&htab->lock, flags);
 
-       head = select_bucket(htab, hash);
-
        l = lookup_elem_raw(head, hash, key, key_size);
 
        if (l) {