--- /dev/null
+From e6e6ef4275978823ec3a84133fc91f4ffbef5c84 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@imgtec.com>
+Date: Mon, 22 Feb 2016 18:09:44 +0000
+Subject: [PATCH] MIPS: Add barriers between dcache & icache flushes
+
+Index-based cache operations may be arbitrarily reordered by out of
+order CPUs. Thus code which writes back the dcache & then invalidates
+the icache using indexed cache ops must include a barrier between
+operating on the 2 caches in order to prevent the scenario in which:
+
+ - icache invalidation occurs.
+
+ - icache fetch occurs, due to speculation.
+
+ - dcache writeback occurs.
+
+If the above were allowed to happen then the icache would contain stale
+data. Forcing the dcache writeback to complete before the icache
+invalidation avoids this.
+
+Signed-off-by: Paul Burton <paul.burton@imgtec.com>
+Cc: James Hogan <james.hogan@imgtec.com>
+---
+ arch/mips/mm/c-r4k.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -515,6 +515,7 @@ static inline void local_r4k___flush_cac
+
+ default:
+ r4k_blast_dcache();
++ mb(); /* cache instructions may be reordered */
+ r4k_blast_icache();
+ break;
+ }
+@@ -595,8 +596,10 @@ static inline void local_r4k_flush_cache
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+ r4k_blast_dcache();
+ /* If executable, blast stale lines from icache */
+- if (exec)
++ if (exec) {
++ mb(); /* cache instructions may be reordered */
+ r4k_blast_icache();
++ }
+ }
+
+ static void r4k_flush_cache_range(struct vm_area_struct *vma,
+@@ -697,8 +700,13 @@ static inline void local_r4k_flush_cache
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+ vaddr ? r4k_blast_dcache_page(addr) :
+ r4k_blast_dcache_user_page(addr);
+- if (exec && !cpu_icache_snoops_remote_store)
++ if (exec)
++ mb(); /* cache instructions may be reordered */
++
++ if (exec && !cpu_icache_snoops_remote_store) {
+ r4k_blast_scache_page(addr);
++ mb(); /* cache instructions may be reordered */
++ }
+ }
+ if (exec) {
+ if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
+@@ -765,6 +773,7 @@ static inline void __local_r4k_flush_ica
+ else
+ blast_dcache_range(start, end);
+ }
++ mb(); /* cache instructions may be reordered */
+ }
+
+ if (type == R4K_INDEX ||
--- /dev/null
+From e6e6ef4275978823ec3a84133fc91f4ffbef5c84 Mon Sep 17 00:00:00 2001
+From: Paul Burton <paul.burton@imgtec.com>
+Date: Mon, 22 Feb 2016 18:09:44 +0000
+Subject: [PATCH] MIPS: Add barriers between dcache & icache flushes
+
+Index-based cache operations may be arbitrarily reordered by out of
+order CPUs. Thus code which writes back the dcache & then invalidates
+the icache using indexed cache ops must include a barrier between
+operating on the 2 caches in order to prevent the scenario in which:
+
+ - icache invalidation occurs.
+
+ - icache fetch occurs, due to speculation.
+
+ - dcache writeback occurs.
+
+If the above were allowed to happen then the icache would contain stale
+data. Forcing the dcache writeback to complete before the icache
+invalidation avoids this.
+
+Signed-off-by: Paul Burton <paul.burton@imgtec.com>
+Cc: James Hogan <james.hogan@imgtec.com>
+---
+ arch/mips/mm/c-r4k.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -515,6 +515,7 @@ static inline void local_r4k___flush_cac
+
+ default:
+ r4k_blast_dcache();
++ mb(); /* cache instructions may be reordered */
+ r4k_blast_icache();
+ break;
+ }
+@@ -595,8 +596,10 @@ static inline void local_r4k_flush_cache
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+ r4k_blast_dcache();
+ /* If executable, blast stale lines from icache */
+- if (exec)
++ if (exec) {
++ mb(); /* cache instructions may be reordered */
+ r4k_blast_icache();
++ }
+ }
+
+ static void r4k_flush_cache_range(struct vm_area_struct *vma,
+@@ -697,8 +700,13 @@ static inline void local_r4k_flush_cache
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+ vaddr ? r4k_blast_dcache_page(addr) :
+ r4k_blast_dcache_user_page(addr);
+- if (exec && !cpu_icache_snoops_remote_store)
++ if (exec)
++ mb(); /* cache instructions may be reordered */
++
++ if (exec && !cpu_icache_snoops_remote_store) {
+ r4k_blast_scache_page(addr);
++ mb(); /* cache instructions may be reordered */
++ }
+ }
+ if (exec) {
+ if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
+@@ -765,6 +773,7 @@ static inline void __local_r4k_flush_ica
+ else
+ blast_dcache_range(start, end);
+ }
++ mb(); /* cache instructions may be reordered */
+ }
+
+ if (type == R4K_INDEX ||