};
extern void slb_initialize(void);
+extern void core_flush_all_slbs(struct mm_struct *mm);
extern void slb_flush_and_rebolt(void);
void slb_flush_all_realmode(void);
void __slb_restore_bolted_realmode(void);
struct tlb_core_data tcd;
#endif /* CONFIG_PPC_BOOK3E */
-#ifdef CONFIG_PPC_BOOK3S
- mm_context_id_t mm_ctx_id;
-#ifdef CONFIG_PPC_MM_SLICES
- unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
- unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long mm_ctx_slb_addr_limit;
-#else
- u16 mm_ctx_user_psize;
- u16 mm_ctx_sllp;
-#endif
-#endif
-
/*
* then miscellaneous read-write fields
*/
#endif /* CONFIG_PPC_BOOK3S_64 */
} ____cacheline_aligned;
-extern void copy_mm_to_paca(struct mm_struct *mm);
extern struct paca_struct **paca_ptrs;
extern void initialise_paca(struct paca_struct *new_paca, int cpu);
extern void setup_paca(struct paca_struct *new_paca);
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
-#ifdef CONFIG_PPC_BOOK3S
- OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
- OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
- OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
- DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
-#endif /* CONFIG_PPC_MM_SLICES */
-#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACAPGD, paca_struct, pgd);
printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
paca_ptrs_size + paca_struct_size, nr_cpu_ids);
}
-
-void copy_mm_to_paca(struct mm_struct *mm)
-{
-#ifdef CONFIG_PPC_BOOK3S
- mm_context_t *context = &mm->context;
-
- get_paca()->mm_ctx_id = context->id;
-#ifdef CONFIG_PPC_MM_SLICES
- VM_BUG_ON(!mm->context.slb_addr_limit);
- get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
- memcpy(&get_paca()->mm_ctx_low_slices_psize,
- &context->low_slices_psize, sizeof(context->low_slices_psize));
- memcpy(&get_paca()->mm_ctx_high_slices_psize,
- &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
-#else /* CONFIG_PPC_MM_SLICES */
- get_paca()->mm_ctx_user_psize = context->user_psize;
- get_paca()->mm_ctx_sllp = context->sllp;
-#endif
-#else /* !CONFIG_PPC_BOOK3S */
- return;
-#endif
-}
}
#ifdef CONFIG_PPC_MM_SLICES
-static unsigned int get_paca_psize(unsigned long addr)
+static unsigned int get_psize(struct mm_struct *mm, unsigned long addr)
{
unsigned char *psizes;
unsigned long index, mask_index;
if (addr < SLICE_LOW_TOP) {
- psizes = get_paca()->mm_ctx_low_slices_psize;
+ psizes = mm->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr);
} else {
- psizes = get_paca()->mm_ctx_high_slices_psize;
+ psizes = mm->context.high_slices_psize;
index = GET_HIGH_SLICE_INDEX(addr);
}
mask_index = index & 0x1;
}
#else
-unsigned int get_paca_psize(unsigned long addr)
+unsigned int get_psize(struct mm_struct *mm, unsigned long addr)
{
- return get_paca()->mm_ctx_user_psize;
+ return mm->context.user_psize;
}
#endif
#ifdef CONFIG_PPC_64K_PAGES
void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
{
- if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
+ if (get_psize(mm, addr) == MMU_PAGE_4K)
return;
slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
copro_flush_all_slbs(mm);
- if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
-
- copy_mm_to_paca(mm);
- slb_flush_and_rebolt();
- }
+ core_flush_all_slbs(mm);
}
#endif /* CONFIG_PPC_64K_PAGES */
trap, vsid, ssize, psize, lpsize, pte);
}
-static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
- int psize, bool user_region)
-{
- if (user_region) {
- if (psize != get_paca_psize(ea)) {
- copy_mm_to_paca(mm);
- slb_flush_and_rebolt();
- }
- } else if (get_paca()->vmalloc_sllp !=
- mmu_psize_defs[mmu_vmalloc_psize].sllp) {
- get_paca()->vmalloc_sllp =
- mmu_psize_defs[mmu_vmalloc_psize].sllp;
- slb_vmalloc_update();
- }
-}
-
/* Result code is:
* 0 - handled
* 1 - normal page fault
rc = 1;
goto bail;
}
- psize = get_slice_psize(mm, ea);
+ psize = get_psize(mm, ea);
ssize = user_segment_size(ea);
vsid = get_user_vsid(&mm->context, ea, ssize);
break;
WARN_ON(1);
}
#endif
- if (current->mm == mm)
- check_paca_psize(ea, mm, psize, user_region);
-
goto bail;
}
"to 4kB pages because of "
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
+ slb_vmalloc_update();
copro_flush_all_slbs(mm);
+ core_flush_all_slbs(mm);
}
}
#endif /* CONFIG_PPC_64K_PAGES */
- if (current->mm == mm)
- check_paca_psize(ea, mm, psize, user_region);
-
#ifdef CONFIG_PPC_64K_PAGES
if (psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap,
#ifdef CONFIG_PPC_MM_SLICES
static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
{
- int psize = get_slice_psize(mm, ea);
+ int psize = get_psize(mm, ea);
/* We only prefault standard pages for now */
if (unlikely(psize != mm->context.user_psize))
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
- * in switch_slb(), and/or the store of paca->mm_ctx_id in
- * copy_mm_to_paca().
+ * in switch_slb(), and/or load of MMU context id.
*
* On the other side, the barrier is in mm/tlb-radix.c for
* radix which orders earlier stores to clear the PTEs vs
get_paca()->slb_cache_ptr = 0;
}
- copy_mm_to_paca(mm);
-
/*
* preload some userspace segments into the SLB.
* Almost all 32 and 64bit PowerPC executables are linked at
mmu_slb_size = size;
}
+static void cpu_flush_slb(void *parm)
+{
+ struct mm_struct *mm = parm;
+ unsigned long flags;
+
+ if (mm != current->active_mm)
+ return;
+
+ local_irq_save(flags);
+ slb_flush_and_rebolt();
+ local_irq_restore(flags);
+}
+
+void core_flush_all_slbs(struct mm_struct *mm)
+{
+ on_each_cpu(cpu_flush_slb, mm, 1);
+}
+
void slb_initialize(void)
{
unsigned long linear_llp, vmalloc_llp, io_llp;
return true;
}
-static void slice_flush_segments(void *parm)
-{
-#ifdef CONFIG_PPC64
- struct mm_struct *mm = parm;
- unsigned long flags;
-
- if (mm != current->active_mm)
- return;
-
- copy_mm_to_paca(current->active_mm);
-
- local_irq_save(flags);
- slb_flush_and_rebolt();
- local_irq_restore(flags);
-#endif
-}
-
static void slice_convert(struct mm_struct *mm,
const struct slice_mask *mask, int psize)
{
spin_unlock_irqrestore(&slice_convert_lock, flags);
copro_flush_all_slbs(mm);
+#ifdef CONFIG_PPC64
+ core_flush_all_slbs(mm);
+#endif
}
/*
* be already initialised beyond the old address limit.
*/
mm->context.slb_addr_limit = high_limit;
-
- on_each_cpu(slice_flush_segments, mm, 1);
+#ifdef CONFIG_PPC64
+ core_flush_all_slbs(mm);
+#endif
}
/* Sanity checks */
(SLICE_NUM_HIGH &&
!bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
slice_convert(mm, &potential_mask, psize);
+#ifdef CONFIG_PPC64
if (psize > MMU_PAGE_BASE)
- on_each_cpu(slice_flush_segments, mm, 1);
+ core_flush_all_slbs(mm);
+#endif
}
return newaddr;