s390/mm: use a single lock for the fields in mm_context_t
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Thu, 17 Aug 2017 16:17:49 +0000 (18:17 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Wed, 6 Sep 2017 07:24:43 +0000 (09:24 +0200)
The three locks 'lock', 'pgtable_lock' and 'gmap_lock' in the
mm_context_t can be reduced to a single lock.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/mm/gmap.c
arch/s390/mm/pgalloc.c

index 3525fe6e7e4c55f01c20ca541a0400847313bd80..3f46a6577b8d70d891ee032777bbaaabb6ec8229 100644 (file)
@@ -9,9 +9,7 @@ typedef struct {
        cpumask_t cpu_attach_mask;
        atomic_t flush_count;
        unsigned int flush_mm;
-       spinlock_t pgtable_lock;
        struct list_head pgtable_list;
-       spinlock_t gmap_lock;
        struct list_head gmap_list;
        unsigned long gmap_asce;
        unsigned long asce;
@@ -29,10 +27,7 @@ typedef struct {
 
 #define INIT_MM_CONTEXT(name)                                             \
        .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock),           \
-       .context.pgtable_lock =                                            \
-                       __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock),   \
        .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
-       .context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \
        .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
 
 static inline int tprot(unsigned long addr)
index 484efe8f42341b6375c54e5e271d7890d34ad83f..3c9abedc323cbb488f46a7ae1d1bc40e4c7de9e1 100644 (file)
@@ -18,9 +18,7 @@ static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
        spin_lock_init(&mm->context.lock);
-       spin_lock_init(&mm->context.pgtable_lock);
        INIT_LIST_HEAD(&mm->context.pgtable_list);
-       spin_lock_init(&mm->context.gmap_lock);
        INIT_LIST_HEAD(&mm->context.gmap_list);
        cpumask_clear(&mm->context.cpu_attach_mask);
        atomic_set(&mm->context.flush_count, 0);
index 9e1494e3d84903332eafd9e698dc625dd51d8352..2f66290c9b9273b2bf0295742bb5e56c0d51f1d6 100644 (file)
@@ -100,14 +100,14 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
        if (!gmap)
                return NULL;
        gmap->mm = mm;
-       spin_lock(&mm->context.gmap_lock);
+       spin_lock(&mm->context.lock);
        list_add_rcu(&gmap->list, &mm->context.gmap_list);
        if (list_is_singular(&mm->context.gmap_list))
                gmap_asce = gmap->asce;
        else
                gmap_asce = -1UL;
        WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
-       spin_unlock(&mm->context.gmap_lock);
+       spin_unlock(&mm->context.lock);
        return gmap;
 }
 EXPORT_SYMBOL_GPL(gmap_create);
@@ -248,7 +248,7 @@ void gmap_remove(struct gmap *gmap)
                spin_unlock(&gmap->shadow_lock);
        }
        /* Remove gmap from the pre-mm list */
-       spin_lock(&gmap->mm->context.gmap_lock);
+       spin_lock(&gmap->mm->context.lock);
        list_del_rcu(&gmap->list);
        if (list_empty(&gmap->mm->context.gmap_list))
                gmap_asce = 0;
@@ -258,7 +258,7 @@ void gmap_remove(struct gmap *gmap)
        else
                gmap_asce = -1UL;
        WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
-       spin_unlock(&gmap->mm->context.gmap_lock);
+       spin_unlock(&gmap->mm->context.lock);
        synchronize_rcu();
        /* Put reference */
        gmap_put(gmap);
index 05b5b1b0a8d99f62b10439f7744a95e085a80956..05f1f27e6708f31642b9297013109702127c73b2 100644 (file)
@@ -188,7 +188,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
        /* Try to get a fragment of a 4K page as a 2K page table */
        if (!mm_alloc_pgste(mm)) {
                table = NULL;
-               spin_lock_bh(&mm->context.pgtable_lock);
+               spin_lock_bh(&mm->context.lock);
                if (!list_empty(&mm->context.pgtable_list)) {
                        page = list_first_entry(&mm->context.pgtable_list,
                                                struct page, lru);
@@ -203,7 +203,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
                                list_del(&page->lru);
                        }
                }
-               spin_unlock_bh(&mm->context.pgtable_lock);
+               spin_unlock_bh(&mm->context.lock);
                if (table)
                        return table;
        }
@@ -227,9 +227,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
                /* Return the first 2K fragment of the page */
                atomic_set(&page->_mapcount, 1);
                clear_table(table, _PAGE_INVALID, PAGE_SIZE);
-               spin_lock_bh(&mm->context.pgtable_lock);
+               spin_lock_bh(&mm->context.lock);
                list_add(&page->lru, &mm->context.pgtable_list);
-               spin_unlock_bh(&mm->context.pgtable_lock);
+               spin_unlock_bh(&mm->context.lock);
        }
        return table;
 }
@@ -243,13 +243,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
        if (!mm_alloc_pgste(mm)) {
                /* Free 2K page table fragment of a 4K page */
                bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
-               spin_lock_bh(&mm->context.pgtable_lock);
+               spin_lock_bh(&mm->context.lock);
                mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
                if (mask & 3)
                        list_add(&page->lru, &mm->context.pgtable_list);
                else
                        list_del(&page->lru);
-               spin_unlock_bh(&mm->context.pgtable_lock);
+               spin_unlock_bh(&mm->context.lock);
                if (mask != 0)
                        return;
        }
@@ -275,13 +275,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
                return;
        }
        bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
-       spin_lock_bh(&mm->context.pgtable_lock);
+       spin_lock_bh(&mm->context.lock);
        mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
        if (mask & 3)
                list_add_tail(&page->lru, &mm->context.pgtable_list);
        else
                list_del(&page->lru);
-       spin_unlock_bh(&mm->context.pgtable_lock);
+       spin_unlock_bh(&mm->context.lock);
        table = (unsigned long *) (__pa(table) | (1U << bit));
        tlb_remove_table(tlb, table);
 }