#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */
#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */
#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
+#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */
#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */
#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */
#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */
#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */
+/* Bit definitions for MMUCSR0 */
+#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
+#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
+#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
+#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
+
/* Bit definitions for SGR. */
#define SGR_NORMAL 0 /* Speculative fetching allowed. */
#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
#include <linux/mm.h>
extern void _tlbie(unsigned long address, unsigned int pid);
+extern void _tlbil_all(void);
+extern void _tlbil_pid(unsigned int pid);
+extern void _tlbil_va(unsigned long address, unsigned int pid);
#if defined(CONFIG_40x) || defined(CONFIG_8xx)
#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
static inline void flush_tlb_mm(struct mm_struct *mm)
{
- _tlbia();
+ _tlbil_pid(mm->context.id);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
- _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0);
+ _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
}
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
unsigned long vmaddr)
{
- _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0);
+ flush_tlb_page(vma, vmaddr);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- _tlbia();
+ _tlbil_pid(vma->vm_mm->context.id);
}
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
- _tlbia();
+ _tlbil_pid(0);
}
#elif defined(CONFIG_PPC32)
/*
* Flush MMU TLB
*/
+#ifndef CONFIG_FSL_BOOKE
+_GLOBAL(_tlbil_all)
+_GLOBAL(_tlbil_pid)
+#endif
_GLOBAL(_tlbia)
#if defined(CONFIG_40x)
sync /* Flush to memory before changing mapping */
/*
* Flush MMU TLB for a particular address
*/
+#ifndef CONFIG_FSL_BOOKE
+_GLOBAL(_tlbil_va)
+#endif
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
/* We run the search with interrupts disabled because we have to change
#endif /* ! CONFIG_40x */
blr
+#if defined(CONFIG_FSL_BOOKE)
+/*
+ * Flush MMU TLB, but only on the local processor (no broadcast)
+ */
+_GLOBAL(_tlbil_all)
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ blr
+
+/*
+ * Flush MMU TLB for a particular process id, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_pid)
+/* we currently do an invalidate all since we don't have per pid invalidate */
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ blr
+
+/*
+ * Flush MMU TLB for a particular address, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_va)
+ slwi r4,r4,16
+ mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
+ tlbsx 0,r3
+ mfspr r4,SPRN_MAS1 /* check valid */
+ andis. r3,r4,MAS1_VALID@h
+ beqlr
+ rlwinm r4,r4,0,1,31
+ mtspr SPRN_MAS1,r4
+ tlbwe
+ blr
+#endif /* CONFIG_FSL_BOOKE */
+
+
/*
* Flush instruction cache.
* This is a no-op on the 601.
EXPORT_SYMBOL(flush_tlb_kernel_range);
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(_tlbie);
+#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
+EXPORT_SYMBOL(_tlbil_va);
+#endif
#endif
EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);