s390/mm: use refcount_t for refcount
authorChuhong Yuan <hslester96@gmail.com>
Thu, 8 Aug 2019 07:18:26 +0000 (15:18 +0800)
committerVasily Gorbik <gor@linux.ibm.com>
Wed, 21 Aug 2019 10:41:43 +0000 (12:41 +0200)
Reference counters are preferred to use refcount_t instead of
atomic_t.
This is because the implementation of refcount_t can prevent
overflows and detect possible use-after-free.
So convert atomic_t ref counters to refcount_t.

Link: http://lkml.kernel.org/r/20190808071826.6649-1-hslester96@gmail.com
Signed-off-by: Chuhong Yuan <hslester96@gmail.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/include/asm/gmap.h
arch/s390/mm/gmap.c

index fcbd638fb9f4c353523c7a3f1f7dd1b56ecfd0fe..37f96b6f0e611a21785adab66b01005d91b3f043 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef _ASM_S390_GMAP_H
 #define _ASM_S390_GMAP_H
 
+#include <linux/refcount.h>
+
 /* Generic bits for GMAP notification on DAT table entry changes. */
 #define GMAP_NOTIFY_SHADOW     0x2
 #define GMAP_NOTIFY_MPROT      0x1
@@ -46,7 +48,7 @@ struct gmap {
        struct radix_tree_root guest_to_host;
        struct radix_tree_root host_to_guest;
        spinlock_t guest_table_lock;
-       atomic_t ref_count;
+       refcount_t ref_count;
        unsigned long *table;
        unsigned long asce;
        unsigned long asce_end;
index 1e668b95e0c664352dac7a8ea8c4c4d189d07996..3770f12d92526fd976b5f1994e2d0d4cf05c6128 100644 (file)
@@ -67,7 +67,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
        INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
        spin_lock_init(&gmap->guest_table_lock);
        spin_lock_init(&gmap->shadow_lock);
-       atomic_set(&gmap->ref_count, 1);
+       refcount_set(&gmap->ref_count, 1);
        page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
        if (!page)
                goto out_free;
@@ -214,7 +214,7 @@ static void gmap_free(struct gmap *gmap)
  */
 struct gmap *gmap_get(struct gmap *gmap)
 {
-       atomic_inc(&gmap->ref_count);
+       refcount_inc(&gmap->ref_count);
        return gmap;
 }
 EXPORT_SYMBOL_GPL(gmap_get);
@@ -227,7 +227,7 @@ EXPORT_SYMBOL_GPL(gmap_get);
  */
 void gmap_put(struct gmap *gmap)
 {
-       if (atomic_dec_return(&gmap->ref_count) == 0)
+       if (refcount_dec_and_test(&gmap->ref_count))
                gmap_free(gmap);
 }
 EXPORT_SYMBOL_GPL(gmap_put);
@@ -1594,7 +1594,7 @@ static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
                        continue;
                if (!sg->initialized)
                        return ERR_PTR(-EAGAIN);
-               atomic_inc(&sg->ref_count);
+               refcount_inc(&sg->ref_count);
                return sg;
        }
        return NULL;
@@ -1682,7 +1682,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
                        }
                }
        }
-       atomic_set(&new->ref_count, 2);
+       refcount_set(&new->ref_count, 2);
        list_add(&new->list, &parent->children);
        if (asce & _ASCE_REAL_SPACE) {
                /* nothing to protect, return right away */