From 1ab534e85c93945f7862378d8c8adcf408205b19 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 24 Aug 2018 10:03:51 -0700 Subject: [PATCH 01/15] x86/spectre: Add missing family 6 check to microcode check The check for Spectre microcodes does not check for family 6, only the model numbers. Add a family 6 check to avoid ambiguity with other families. Fixes: a5b296636453 ("x86/cpufeature: Blacklist SPEC_CTRL/PRED_CMD on early Spectre v2 microcodes") Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180824170351.34874-2-andi@firstfloor.org --- arch/x86/kernel/cpu/intel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 401e8c133108..fc3c07fe7df5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_HYPERVISOR)) return false; + if (c->x86 != 6) + return false; + for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { if (c->x86_model == spectre_bad_microcodes[i].model && c->x86_stepping == spectre_bad_microcodes[i].stepping) From cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 24 Aug 2018 10:03:50 -0700 Subject: [PATCH 02/15] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+ On Nehalem and newer core CPUs the CPU cache internally uses 44 bits physical address space. The L1TF workaround is limited by this internal cache address width, and needs to have one bit free there for the mitigation to work. Older client systems report only 36bit physical address space so the range check decides that L1TF is not mitigated for a 36bit phys/32GB system with some memory holes. But since these actually have the larger internal cache width this warning is bogus because it would only really be needed if the system had more than 43bits of memory. Add a new internal x86_cache_bits field. Normally it is the same as the physical bits field reported by CPUID, but for Nehalem and newerforce it to be at least 44bits. Change the L1TF memory size warning to use the new cache_bits field to avoid bogus warnings and remove the bogus comment about memory size. Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf") Reported-by: George Anchev Reported-by: Christopher Snowhill Signed-off-by: Andi Kleen Signed-off-by: Thomas Gleixner Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Cc: Michael Hocko Cc: vbabka@suse.cz Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org --- arch/x86/include/asm/processor.h | 4 ++- arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++++++---- arch/x86/kernel/cpu/common.c | 1 + 3 files changed, 45 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index c24297268ebc..d53c54b842da 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -132,6 +132,8 @@ struct cpuinfo_x86 { /* Index into per_cpu list: */ u16 cpu_index; u32 microcode; + /* Address space bits used by the cache internally */ + u8 x86_cache_bits; unsigned initialized : 1; } __randomize_layout; @@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c); static inline unsigned long long l1tf_pfn_limit(void) { - return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT); + return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); } extern void early_cpu_init(void); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4c2313d0b9ca..40bdaea97fe7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation); enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); +/* + * These CPUs all support 44bits physical address space internally in the + * cache but CPUID can report a smaller number of physical address bits. + * + * The L1TF mitigation uses the top most address bit for the inversion of + * non present PTEs. When the installed memory reaches into the top most + * address bit due to memory holes, which has been observed on machines + * which report 36bits physical address bits and have 32G RAM installed, + * then the mitigation range check in l1tf_select_mitigation() triggers. + * This is a false positive because the mitigation is still possible due to + * the fact that the cache uses 44bit internally. Use the cache bits + * instead of the reported physical bits and adjust them on the affected + * machines to 44bit if the reported bits are less than 44. + */ +static void override_cache_bits(struct cpuinfo_x86 *c) +{ + if (c->x86 != 6) + return; + + switch (c->x86_model) { + case INTEL_FAM6_NEHALEM: + case INTEL_FAM6_WESTMERE: + case INTEL_FAM6_SANDYBRIDGE: + case INTEL_FAM6_IVYBRIDGE: + case INTEL_FAM6_HASWELL_CORE: + case INTEL_FAM6_HASWELL_ULT: + case INTEL_FAM6_HASWELL_GT3E: + case INTEL_FAM6_BROADWELL_CORE: + case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_SKYLAKE_MOBILE: + case INTEL_FAM6_SKYLAKE_DESKTOP: + case INTEL_FAM6_KABYLAKE_MOBILE: + case INTEL_FAM6_KABYLAKE_DESKTOP: + if (c->x86_cache_bits < 44) + c->x86_cache_bits = 44; + break; + } +} + static void __init l1tf_select_mitigation(void) { u64 half_pa; @@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void) if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; + override_cache_bits(&boot_cpu_data); + switch (l1tf_mitigation) { case L1TF_MITIGATION_OFF: case L1TF_MITIGATION_FLUSH_NOWARN: @@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void) return; #endif - /* - * This is extremely unlikely to happen because almost all - * systems have far more MAX_PA/2 than RAM can be fit into - * DIMM slots. - */ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 84dee5ab745a..44c4ef3d989b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c) else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) c->x86_phys_bits = 36; #endif + c->x86_cache_bits = c->x86_phys_bits; } static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) From e3a5dc08715abba646324fd8456282bd77798e9c Mon Sep 17 00:00:00 2001 From: Nikolas Nyby Date: Sat, 25 Aug 2018 19:10:54 -0400 Subject: [PATCH 03/15] x86/Kconfig: Fix trivial typo Fix a typo in the Kconfig help text: adverticed -> advertised. Signed-off-by: Nikolas Nyby Signed-off-by: Thomas Gleixner Cc: trivial@kernel.org Cc: tglx@linutronix.de Cc: x86@kernel.org Link: https://lkml.kernel.org/r/20180825231054.23813-1-nikolas@gnu.org --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c5ff296bc5d1..1a0be022f91d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2843,7 +2843,7 @@ config X86_SYSFB This option, if enabled, marks VGA/VBE/EFI framebuffers as generic framebuffers so the new generic system-framebuffer drivers can be used on x86. If the framebuffer is not compatible with the generic - modes, it is adverticed as fallback platform framebuffer so legacy + modes, it is advertised as fallback platform framebuffer so legacy drivers like efifb, vesafb and uvesafb can pick it up. If this option is not selected, all system framebuffers are always marked as fallback platform framebuffers as usual. From 36bf9da2913054c218337d8cd7cb11bddc1fafb0 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Aug 2018 14:45:14 +0900 Subject: [PATCH 04/15] x86/build: Remove jump label quirk for GCC older than 4.5.2 Commit cafa0010cd51 ("Raise the minimum required gcc version to 4.6") bumped the minimum GCC version to 4.6 for all architectures. Remove the workaround code. It was the only user of cc-if-fullversion. Remove the macro as well. Signed-off-by: Masahiro Yamada Signed-off-by: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Michal Marek Cc: linux-kbuild@vger.kernel.org Link: https://lkml.kernel.org/r/1535348714-25457-1-git-send-email-yamada.masahiro@socionext.com --- arch/x86/Makefile | 12 ------------ scripts/Kbuild.include | 4 ---- 2 files changed, 16 deletions(-) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94859241bc3e..8fc8f94ef5f5 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -179,18 +179,6 @@ ifndef CC_HAVE_ASM_GOTO $(error Compiler lacks asm-goto support.) endif -# -# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a -# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way -# to test for this bug at compile-time because the test case needs to execute, -# which is a no-go for cross compilers. So check the GCC version instead. -# -ifdef CONFIG_JUMP_LABEL - ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1) - ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1) - endif -endif - ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) # This compiler flag is not supported by Clang: KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index c75413d05a63..ce53639a864a 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \ # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) -# cc-if-fullversion -# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1) -cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4)) - # cc-ldoption # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) cc-ldoption = $(call try-run,\ From 1f59a4581b5ecfe9b4f049a7a2cf904d8352842d Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 27 Aug 2018 14:40:09 -0700 Subject: [PATCH 05/15] x86/irqflags: Mark native_restore_fl extern inline This should have been marked extern inline in order to pick up the out of line definition in arch/x86/kernel/irqflags.S. Fixes: 208cbb325589 ("x86/irqflags: Provide a declaration for native_save_fl") Reported-by: Ben Hutchings Signed-off-by: Nick Desaulniers Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: "H. Peter Anvin" Cc: Boris Ostrovsky Cc: Greg Kroah-Hartman Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180827214011.55428-1-ndesaulniers@google.com --- arch/x86/include/asm/irqflags.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index c14f2a74b2be..15450a675031 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void) return flags; } -static inline void native_restore_fl(unsigned long flags) +extern inline void native_restore_fl(unsigned long flags); +extern inline void native_restore_fl(unsigned long flags) { asm volatile("push %0 ; popf" : /* no output */ From f12d11c5c184626b4befdee3d573ec8237405a33 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 28 Aug 2018 20:40:33 +0200 Subject: [PATCH 06/15] x86/entry/64: Wipe KASAN stack shadow before rewind_stack_do_exit() Reset the KASAN shadow state of the task stack before rewinding RSP. Without this, a kernel oops will leave parts of the stack poisoned, and code running under do_exit() can trip over such poisoned regions and cause nonsensical false-positive KASAN reports about stack-out-of-bounds bugs. This does not wipe the exception stacks; if an oops happens on an exception stack, it might result in random KASAN false-positives from other tasks afterwards. This is probably relatively uninteresting, since if the kernel oopses on an exception stack, there are most likely bigger things to worry about. It'd be more interesting if vmapped stacks and KASAN were compatible, since then handle_stack_overflow() would oops from exception stack context. Fixes: 2deb4be28077 ("x86/dumpstack: When OOPSing, rewind the stack before do_exit()") Signed-off-by: Jann Horn Signed-off-by: Thomas Gleixner Acked-by: Andrey Ryabinin Cc: Andy Lutomirski Cc: Dmitry Vyukov Cc: Alexander Potapenko Cc: Kees Cook Cc: kasan-dev@googlegroups.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180828184033.93712-1-jannh@google.com --- arch/x86/kernel/dumpstack.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 9c8652974f8e..1596e6bfea6f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -346,7 +347,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr) * We're not going to return, but we might be on an IST stack or * have very little stack space left. Rewind the stack and kill * the task. + * Before we rewind the stack, we have to tell KASAN that we're going to + * reuse the task stack and that existing poisons are invalid. */ + kasan_unpoison_task_stack(current); rewind_stack_do_exit(signr); } NOKPROBE_SYMBOL(oops_end); From 9222f606506c5f8ca2c8b8c939d59ed3e6ac4148 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 28 Aug 2018 08:55:14 +0200 Subject: [PATCH 07/15] x86/alternatives: Lockdep-enforce text_mutex in text_poke*() text_poke() and text_poke_bp() must be called with text_mutex held. Put proper lockdep anotation in place instead of just mentioning the requirement in a comment. Reported-by: Peter Zijlstra Signed-off-by: Jiri Kosina Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Acked-by: Masami Hiramatsu Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1808280853520.25787@cbobk.fhfr.pm --- arch/x86/kernel/alternative.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 014f214da581..b9d5e7c9ef43 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, * It means the size must be writable atomically and the address must be aligned * in a way that permits an atomic write. It also makes sure we fit on a single * page. - * - * Note: Must be called under text_mutex. */ void *text_poke(void *addr, const void *opcode, size_t len) { @@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len) */ BUG_ON(!after_bootmem); + lockdep_assert_held(&text_mutex); + if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); pages[1] = vmalloc_to_page(addr + PAGE_SIZE); @@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs) * - replace the first byte (int3) by the first byte of * replacing opcode * - sync cores - * - * Note: must be called under text_mutex. */ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) { @@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) bp_int3_handler = handler; bp_int3_addr = (u8 *)addr + sizeof(int3); bp_patching_in_progress = true; + + lockdep_assert_held(&text_mutex); + /* * Corresponding read barrier in int3 notifier for making sure the * in_progress and handler are correctly ordered wrt. patching. From 26e609eccd37967d3681662433086894830c5d62 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Tue, 14 Aug 2018 18:59:51 +0200 Subject: [PATCH 08/15] x86/asm: Use CC_SET()/CC_OUT() in __gen_sigismember() Replace open-coded set instructions with CC_SET()/CC_OUT(). Signed-off-by: Uros Bizjak Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20180814165951.13538-1-ubizjak@gmail.com --- arch/x86/include/asm/signal.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 5f9012ff52ed..33d3c88a7225 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs); #define __ARCH_HAS_SA_RESTORER +#include #include #ifdef __i386__ @@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig) static inline int __gen_sigismember(sigset_t *set, int _sig) { - unsigned char ret; - asm("btl %2,%1\n\tsetc %0" - : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); + bool ret; + asm("btl %2,%1" CC_SET(c) + : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); return ret; } From 342db04ae71273322f0011384a9ed414df8bdae4 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 28 Aug 2018 17:49:01 +0200 Subject: [PATCH 09/15] x86/dumpstack: Don't dump kernel memory based on usermode RIP show_opcodes() is used both for dumping kernel instructions and for dumping user instructions. If userspace causes #PF by jumping to a kernel address, show_opcodes() can be reached with regs->ip controlled by the user, pointing to kernel code. Make sure that userspace can't trick us into dumping kernel memory into dmesg. Fixes: 7cccf0725cf7 ("x86/dumpstack: Add a show_ip() function") Signed-off-by: Jann Horn Signed-off-by: Thomas Gleixner Reviewed-by: Kees Cook Reviewed-by: Borislav Petkov Cc: "H. Peter Anvin" Cc: Andy Lutomirski Cc: security@kernel.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180828154901.112726-1-jannh@google.com --- arch/x86/include/asm/stacktrace.h | 2 +- arch/x86/kernel/dumpstack.c | 16 +++++++++++++--- arch/x86/mm/fault.c | 2 +- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index b6dc698f992a..f335aad404a4 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void) return (unsigned long)frame; } -void show_opcodes(u8 *rip, const char *loglvl); +void show_opcodes(struct pt_regs *regs, const char *loglvl); void show_ip(struct pt_regs *regs, const char *loglvl); #endif /* _ASM_X86_STACKTRACE_H */ diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 1596e6bfea6f..f56895106ccf 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -90,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable, * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random * guesstimate in attempt to achieve all of the above. */ -void show_opcodes(u8 *rip, const char *loglvl) +void show_opcodes(struct pt_regs *regs, const char *loglvl) { #define PROLOGUE_SIZE 42 #define EPILOGUE_SIZE 21 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) u8 opcodes[OPCODE_BUFSIZE]; + unsigned long prologue = regs->ip - PROLOGUE_SIZE; + bool bad_ip; - if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) { + /* + * Make sure userspace isn't trying to trick us into dumping kernel + * memory by pointing the userspace instruction pointer at it. + */ + bad_ip = user_mode(regs) && + __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); + + if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue, + OPCODE_BUFSIZE)) { printk("%sCode: Bad RIP value.\n", loglvl); } else { printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" @@ -113,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl) #else printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); #endif - show_opcodes((u8 *)regs->ip, loglvl); + show_opcodes(regs, loglvl); } void show_iret_regs(struct pt_regs *regs) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b9123c497e0a..47bebfe6efa7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, printk(KERN_CONT "\n"); - show_opcodes((u8 *)regs->ip, loglvl); + show_opcodes(regs, loglvl); } static void From 829fe4aa9ac16417a904ad1de1307de906854bcf Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 29 Aug 2018 20:43:17 +0100 Subject: [PATCH 10/15] x86: Allow generating user-space headers without a compiler When bootstrapping an architecture, it's usual to generate the kernel's user-space headers (make headers_install) before building a compiler. Move the compiler check (for asm goto support) to the archprepare target so that it is only done when building code for the target. Fixes: e501ce957a78 ("x86: Force asm-goto") Reported-by: Helmut Grohne Signed-off-by: Ben Hutchings Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180829194317.GA4765@decadent.org.uk --- arch/x86/Makefile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 8fc8f94ef5f5..8f6e7eb8ae9f 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -175,10 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER endif endif -ifndef CC_HAVE_ASM_GOTO - $(error Compiler lacks asm-goto support.) -endif - ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) # This compiler flag is not supported by Clang: KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) @@ -300,6 +296,13 @@ PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ +archprepare: checkbin +checkbin: +ifndef CC_HAVE_ASM_GOTO + @echo Compiler lacks asm-goto support. + @exit 1 +endif + archclean: $(Q)rm -rf $(objtree)/arch/i386 $(Q)rm -rf $(objtree)/arch/x86_64 From 4012e77a903d114f915fc607d6d2ed54a3d6c9b1 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 29 Aug 2018 08:47:18 -0700 Subject: [PATCH 11/15] x86/nmi: Fix NMI uaccess race against CR3 switching A NMI can hit in the middle of context switching or in the middle of switch_mm_irqs_off(). In either case, CR3 might not match current->mm, which could cause copy_from_user_nmi() and friends to read the wrong memory. Fix it by adding a new nmi_uaccess_okay() helper and checking it in copy_from_user_nmi() and in __copy_from_user_nmi()'s callers. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Reviewed-by: Rik van Riel Cc: Nadav Amit Cc: Borislav Petkov Cc: Jann Horn Cc: Peter Zijlstra Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/dd956eba16646fd0b15c3c0741269dfd84452dac.1535557289.git.luto@kernel.org --- arch/x86/events/core.c | 2 +- arch/x86/include/asm/tlbflush.h | 40 +++++++++++++++++++++++++++++++++ arch/x86/lib/usercopy.c | 5 +++++ arch/x86/mm/tlb.c | 7 ++++++ 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 5f4829f10129..dfb2f7c0d019 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs perf_callchain_store(entry, regs->ip); - if (!current->mm) + if (!nmi_uaccess_okay()) return; if (perf_callchain_user32(regs, entry)) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 29c9da6c62fc..58ce5288878e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -175,8 +175,16 @@ struct tlb_state { * are on. This means that it may not match current->active_mm, * which will contain the previous user mm when we're in lazy TLB * mode even if we've already switched back to swapper_pg_dir. + * + * During switch_mm_irqs_off(), loaded_mm will be set to + * LOADED_MM_SWITCHING during the brief interrupts-off window + * when CR3 and loaded_mm would otherwise be inconsistent. This + * is for nmi_uaccess_okay()'s benefit. */ struct mm_struct *loaded_mm; + +#define LOADED_MM_SWITCHING ((struct mm_struct *)1) + u16 loaded_mm_asid; u16 next_asid; /* last user mm's ctx id */ @@ -246,6 +254,38 @@ struct tlb_state { }; DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); +/* + * Blindly accessing user memory from NMI context can be dangerous + * if we're in the middle of switching the current user task or + * switching the loaded mm. It can also be dangerous if we + * interrupted some kernel code that was temporarily using a + * different mm. + */ +static inline bool nmi_uaccess_okay(void) +{ + struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); + struct mm_struct *current_mm = current->mm; + + VM_WARN_ON_ONCE(!loaded_mm); + + /* + * The condition we want to check is + * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, + * if we're running in a VM with shadow paging, and nmi_uaccess_okay() + * is supposed to be reasonably fast. + * + * Instead, we check the almost equivalent but somewhat conservative + * condition below, and we rely on the fact that switch_mm_irqs_off() + * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. + */ + if (loaded_mm != current_mm) + return false; + + VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); + + return true; +} + /* Initialize cr4 shadow for this CPU. */ static inline void cr4_init_shadow(void) { diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index c8c6ad0d58b8..3f435d7fca5e 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -7,6 +7,8 @@ #include #include +#include + /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. @@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) if (__range_not_ok(from, n, TASK_SIZE)) return n; + if (!nmi_uaccess_okay()) + return n; + /* * Even though this function is typically called from NMI/IRQ context * disable pagefaults so that its behaviour is consistent even when diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 9517d1b2a281..e96b99eb800c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + /* Let nmi_uaccess_okay() know that we're changing CR3. */ + this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); + barrier(); + if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); @@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (next != &init_mm) this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); + /* Make sure we write CR3 before loaded_mm. */ + barrier(); + this_cpu_write(cpu_tlbstate.loaded_mm, next); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); } From eeb89e2bb1ac45b0836d4170e97a988c3a746c62 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 31 Aug 2018 10:05:38 +0200 Subject: [PATCH 12/15] x86/efi: Load fixmap GDT in efi_call_phys_epilog() When PTI is enabled on x86-32 the kernel uses the GDT mapped in the fixmap for the simple reason that this address is also mapped for user-space. The efi_call_phys_prolog()/efi_call_phys_epilog() wrappers change the GDT to call EFI runtime services and switch back to the kernel GDT when they return. But the switch-back uses the writable GDT, not the fixmap GDT. When that happened and and the CPU returns to user-space it switches to the user %cr3 and tries to restore user segment registers. This fails because the writable GDT is not mapped in the user page-table, and without a GDT the fault handlers also can't be launched. The result is a triple fault and reboot of the machine. Fix that by restoring the GDT back to the fixmap GDT which is also mapped in the user page-table. Fixes: 7757d607c6b3 x86/pti: ('Allow CONFIG_PAGE_TABLE_ISOLATION for x86_32') Reported-by: Guenter Roeck Signed-off-by: Joerg Roedel Signed-off-by: Thomas Gleixner Tested-by: Guenter Roeck Cc: Ard Biesheuvel Cc: Michal Hocko Cc: Andi Kleen Cc: Linus Torvalds Cc: Dave Hansen Cc: Pavel Machek Cc: hpa@zytor.com Cc: linux-efi@vger.kernel.org Link: https://lkml.kernel.org/r/1535702738-10971-1-git-send-email-joro@8bytes.org --- arch/x86/platform/efi/efi_32.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 324b93328b37..05ca14222463 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c @@ -85,14 +85,10 @@ pgd_t * __init efi_call_phys_prolog(void) void __init efi_call_phys_epilog(pgd_t *save_pgd) { - struct desc_ptr gdt_descr; - - gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0); - gdt_descr.size = GDT_SIZE - 1; - load_gdt(&gdt_descr); - load_cr3(save_pgd); __flush_tlb_all(); + + load_fixmap_gdt(0); } void __init efi_runtime_update_mappings(void) From c7486104a5ce7e8763e3cb5157bba8d0f1468d87 Mon Sep 17 00:00:00 2001 From: LuckTony Date: Fri, 31 Aug 2018 09:55:06 -0700 Subject: [PATCH 13/15] x86/mce: Fix set_mce_nospec() to avoid #GP fault The trick with flipping bit 63 to avoid loading the address of the 1:1 mapping of the poisoned page while the 1:1 map is updated used to work when unmapping the page. But it falls down horribly when attempting to directly set the page as uncacheable. The problem is that when the cache mode is changed to uncachable, the pages needs to be flushed from the cache first. But the decoy address is non-canonical due to bit 63 flipped, and the CLFLUSH instruction throws a #GP fault. Add code to change_page_attr_set_clr() to fix the address before calling flush. Fixes: 284ce4011ba6 ("x86/memory_failure: Introduce {set, clear}_mce_nospec()") Suggested-by: Linus Torvalds Signed-off-by: Tony Luck Signed-off-by: Thomas Gleixner Acked-by: Linus Torvalds Cc: Peter Anvin Cc: Borislav Petkov Cc: linux-edac Cc: Dan Williams Cc: Dave Jiang Link: https://lkml.kernel.org/r/20180831165506.GA9605@agluck-desk --- arch/x86/mm/pageattr.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 8d6c34fe49be..51a5a69ecac9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) return 0; } +/* + * Machine check recovery code needs to change cache mode of poisoned + * pages to UC to avoid speculative access logging another error. But + * passing the address of the 1:1 mapping to set_memory_uc() is a fine + * way to encourage a speculative access. So we cheat and flip the top + * bit of the address. This works fine for the code that updates the + * page tables. But at the end of the process we need to flush the cache + * and the non-canonical address causes a #GP fault when used by the + * CLFLUSH instruction. + * + * But in the common case we already have a canonical address. This code + * will fix the top bit if needed and is a no-op otherwise. + */ +static inline unsigned long make_addr_canonical_again(unsigned long addr) +{ +#ifdef CONFIG_X86_64 + return (long)(addr << 1) >> 1; +#else + return addr; +#endif +} + + static int change_page_attr_set_clr(unsigned long *addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr, int force_split, int in_flag, @@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, * Save address for cache flush. *addr is modified in the call * to __change_page_attr_set_clr() below. */ - baddr = *addr; + baddr = make_addr_canonical_again(*addr); } /* Must avoid aliasing mappings in the highmem code */ From e78e5a91456fcecaa2efbb3706572fe043766f4d Mon Sep 17 00:00:00 2001 From: Samuel Neves Date: Sat, 1 Sep 2018 21:14:52 +0100 Subject: [PATCH 14/15] x86/vdso: Fix lsl operand order In the __getcpu function, lsl is using the wrong target and destination registers. Luckily, the compiler tends to choose %eax for both variables, so it has been working so far. Fixes: a582c540ac1b ("x86/vdso: Use RDPID in preference to LSL when available") Signed-off-by: Samuel Neves Signed-off-by: Thomas Gleixner Acked-by: Andy Lutomirski Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180901201452.27828-1-sneves@dei.uc.pt --- arch/x86/include/asm/vgtod.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index fb856c9f0449..53748541c487 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void) * * If RDPID is available, use it. */ - alternative_io ("lsl %[p],%[seg]", + alternative_io ("lsl %[seg],%[p]", ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ X86_FEATURE_RDPID, [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); From ff924c5a1ec7548825cc2d07980b03be4224ffac Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 1 Sep 2018 21:01:28 -0700 Subject: [PATCH 15/15] x86/pti: Fix section mismatch warning/error Fix the section mismatch warning in arch/x86/mm/pti.c: WARNING: vmlinux.o(.text+0x6972a): Section mismatch in reference from the function pti_clone_pgtable() to the function .init.text:pti_user_pagetable_walk_pte() The function pti_clone_pgtable() references the function __init pti_user_pagetable_walk_pte(). This is often because pti_clone_pgtable lacks a __init annotation or the annotation of pti_user_pagetable_walk_pte is wrong. FATAL: modpost: Section mismatches detected. Fixes: 85900ea51577 ("x86/pti: Map the vsyscall page if needed") Reported-by: kbuild test robot Signed-off-by: Randy Dunlap Signed-off-by: Thomas Gleixner Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/43a6d6a3-d69d-5eda-da09-0b1c88215a2a@infradead.org --- arch/x86/mm/pti.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 31341ae7309f..c1fc1ae6b429 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) * * Returns a pointer to a PTE on success, or NULL on failure. */ -static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) +static pte_t *pti_user_pagetable_walk_pte(unsigned long address) { gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); pmd_t *pmd;