s390: always save and restore all registers on context switch
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Mon, 20 Nov 2017 11:38:44 +0000 (12:38 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 5 Dec 2017 06:51:08 +0000 (07:51 +0100)
The switch_to() macro has an optimization to avoid saving and
restoring register contents that aren't needed for kernel threads.

There is however the possibility that a kernel thread execve's a user
space program. In such a case the execve'd process can partially see
the contents of the previous process, which shouldn't be allowed.

To avoid this, simply always save and restore register contents on
context switch.

Cc: <stable@vger.kernel.org> # v2.6.37+
Fixes: fdb6d070effba ("switch_to: dont restore/save access & fpu regs for kernel threads")
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/switch_to.h

index ec7b476c1ac571b82e219a786ad2525a0ff9ceb2..c61b2cc1a8a86a9508fe093a607d106ff0118be3 100644 (file)
@@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs)
        asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
 }
 
-#define switch_to(prev,next,last) do {                                 \
-       if (prev->mm) {                                                 \
-               save_fpu_regs();                                        \
-               save_access_regs(&prev->thread.acrs[0]);                \
-               save_ri_cb(prev->thread.ri_cb);                         \
-               save_gs_cb(prev->thread.gs_cb);                         \
-       }                                                               \
+#define switch_to(prev, next, last) do {                               \
+       /* save_fpu_regs() sets the CIF_FPU flag, which enforces        \
+        * a restore of the floating point / vector registers as        \
+        * soon as the next task returns to user space                  \
+        */                                                             \
+       save_fpu_regs();                                                \
+       save_access_regs(&prev->thread.acrs[0]);                        \
+       save_ri_cb(prev->thread.ri_cb);                                 \
+       save_gs_cb(prev->thread.gs_cb);                                 \
        update_cr_regs(next);                                           \
-       if (next->mm) {                                                 \
-               set_cpu_flag(CIF_FPU);                                  \
-               restore_access_regs(&next->thread.acrs[0]);             \
-               restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);  \
-               restore_gs_cb(next->thread.gs_cb);                      \
-       }                                                               \
-       prev = __switch_to(prev,next);                                  \
+       restore_access_regs(&next->thread.acrs[0]);                     \
+       restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);          \
+       restore_gs_cb(next->thread.gs_cb);                              \
+       prev = __switch_to(prev, next);                                 \
 } while (0)
 
 #endif /* __ASM_SWITCH_TO_H */