x86/asm: Pin sensitive CR0 bits
authorKees Cook <keescook@chromium.org>
Tue, 18 Jun 2019 04:55:03 +0000 (21:55 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Sat, 22 Jun 2019 09:55:22 +0000 (11:55 +0200)
With sensitive CR4 bits pinned now, it's possible that the WP bit for
CR0 might become a target as well.

Following the same reasoning for the CR4 pinning, pin CR0's WP
bit. Contrary to the cpu feature dependend CR4 pinning this can be done
with a constant value.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: kernel-hardening@lists.openwall.com
Link: https://lkml.kernel.org/r/20190618045503.39105-4-keescook@chromium.org
arch/x86/include/asm/special_insns.h

index c8c8143ab27b174155972641b95400c85cfd121c..b2e84d113f2a82e9fd9d007b0cf7b437c2930ad8 100644 (file)
@@ -31,7 +31,20 @@ static inline unsigned long native_read_cr0(void)
 
 static inline void native_write_cr0(unsigned long val)
 {
-       asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
+       unsigned long bits_missing = 0;
+
+set_register:
+       asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
+
+       if (static_branch_likely(&cr_pinning)) {
+               if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
+                       bits_missing = X86_CR0_WP;
+                       val |= bits_missing;
+                       goto set_register;
+               }
+               /* Warn after we've set the missing bits. */
+               WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
+       }
 }
 
 static inline unsigned long native_read_cr2(void)