unsigned long asce_bits;
unsigned long asce_limit;
int noexec;
- int pgstes;
+ int has_pgste; /* The mmu context has extended page tables */
+ int alloc_pgste; /* cloned contexts will have extended page tables */
} mm_context_t;
#endif
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- if (current->mm->context.pgstes) {
+ if (current->mm->context.alloc_pgste) {
+ /*
+ * alloc_pgste indicates, that any NEW context will be created
+ * with extended page tables. The old context is unchanged. The
+ * page table allocation and the page table operations will
+ * look at has_pgste to distinguish normal and extended page
+ * tables. The only way to create extended page tables is to
+ * set alloc_pgste and then create a new context (e.g. dup_mm).
+ * The page table allocation is called after init_new_context
+ * and if has_pgste is set, it will create extended page
+ * tables.
+ */
mm->context.noexec = 0;
- mm->context.pgstes = 1;
+ mm->context.has_pgste = 1;
+ mm->context.alloc_pgste = 1;
} else {
mm->context.noexec = s390_noexec;
- mm->context.pgstes = 0;
+ mm->context.has_pgste = 0;
+ mm->context.alloc_pgste = 0;
}
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- if (mm->context.pgstes)
+ if (mm->context.has_pgste)
ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
struct page *page;
unsigned int skey;
- if (!mm->context.pgstes)
+ if (!mm->context.has_pgste)
return -EINVAL;
rcp_lock(ptep);
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
int young;
unsigned long *pgste;
- if (!vma->vm_mm->context.pgstes)
+ if (!vma->vm_mm->context.has_pgste)
return 0;
physpage = pte_val(*ptep) & PAGE_MASK;
pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
- if (mm->context.pgstes) {
+ if (mm->context.has_pgste) {
rcp_lock(ptep);
__ptep_ipte(address, ptep);
ptep_rcp_copy(ptep);
unsigned long *table;
unsigned long bits;
- bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
+ bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
spin_lock(&mm->page_table_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK;
table = (unsigned long *) page_to_phys(page);
- if (mm->context.pgstes)
+ if (mm->context.has_pgste)
clear_table_pgstes(table);
else
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
struct page *page;
unsigned long bits;
- bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
+ bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
struct mm_struct *mm, *old_mm;
/* Do we have pgstes? if yes, we are done */
- if (tsk->mm->context.pgstes)
+ if (tsk->mm->context.has_pgste)
return 0;
/* lets check if we are allowed to replace the mm */
}
task_unlock(tsk);
- /* we copy the mm with pgstes enabled */
- tsk->mm->context.pgstes = 1;
+ /* we copy the mm and let dup_mm create the page tables with_pgstes */
+ tsk->mm->context.alloc_pgste = 1;
mm = dup_mm(tsk);
- tsk->mm->context.pgstes = 0;
+ tsk->mm->context.alloc_pgste = 0;
if (!mm)
return -ENOMEM;
- /* Now lets check again if somebody attached ptrace etc */
+ /* Now lets check again if something happened */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {