diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 032c7cd3cede0f0f1e3c05b381b282a6fd34395e..834ed1145969b3bdcaafa08e3cf224d58429f586 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -48,6 +48,7 @@ show up in /proc/sys/kernel: - hyperv_record_panic_msg - kexec_load_disabled - kptr_restrict +- machine_check_safe [ arm64 only ] - l2cr [ PPC only ] - modprobe ==> Documentation/debugging-modules.txt - modules_disabled @@ -437,6 +438,27 @@ values to unprivileged users is a concern. When kptr_restrict is set to (2), kernel pointers printed using %pK will be replaced with 0's regardless of privileges. +machine_check_safe (arm64 only) +================================ + +Controls the kernel's behaviour when an hardware memory error is +encountered in the following scenarios: + += =================== +1 cow +2 copy_mc_to_kernel +3 copy_from_user +4 copy_to_user +5 get_user +6 put_user += =================== + +Correspondence between sysctl value and behavior: + += ======================= +0 Kernel panic +1 Kill related processes += ======================= l2cr: (PPC only) ================ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c1357f45a35e07300cdc5032616ceae93966a60e..39b643fe36626fbe2f58f2dfc11aacade4d5791b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -10,6 +10,7 @@ config ARM64 select ACPI_SPCR_TABLE if ACPI select ACPI_PPTT if ACPI select ARCH_CLOCKSOURCE_DATA + select ARCH_HAS_UACCESS_MCSAFE if ACPI_APEI_GHES select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_COHERENT_TO_PFN @@ -21,6 +22,7 @@ config ARM64 select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV select ARCH_HAS_KEEPINITRD + select ARCH_HAS_MC_EXTABLE if ARCH_HAS_UACCESS_MCSAFE select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL @@ -998,6 +1000,9 @@ config ARCH_WANT_HUGE_PMD_SHARE config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_HAS_MC_EXTABLE + bool + config ARCH_ENABLE_SPLIT_PMD_PTLOCK def_bool y if PGTABLE_LEVELS > 2 diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 3cb3c4ab3ea562d073a82a3d64acb51e139e960c..451ce45e6c0edadcb530ff22778f32757f2f14b8 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -224,65 +224,6 @@ alternative_endif _asm_extable 9999b, \label .endm -/* - * Generate the assembly for UAO alternatives with exception table entries. - * This is complicated as there is no post-increment or pair versions of the - * unprivileged instructions, and USER() only works for single instructions. - */ -#ifdef CONFIG_ARM64_UAO - .macro uao_ldp l, reg1, reg2, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: ldp \reg1, \reg2, [\addr], \post_inc; -8889: nop; - nop; - alternative_else - ldtr \reg1, [\addr]; - ldtr \reg2, [\addr, #8]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - _asm_extable 8889b,\l; - .endm - - .macro uao_stp l, reg1, reg2, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: stp \reg1, \reg2, [\addr], \post_inc; -8889: nop; - nop; - alternative_else - sttr \reg1, [\addr]; - sttr \reg2, [\addr, #8]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - _asm_extable 8889b,\l; - .endm - - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: \inst \reg, [\addr], \post_inc; - nop; - alternative_else - \alt_inst \reg, [\addr]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - .endm -#else - .macro uao_ldp l, reg1, reg2, addr, post_inc - USER(\l, ldp \reg1, \reg2, [\addr], \post_inc) - .endm - .macro uao_stp l, reg1, reg2, addr, post_inc - USER(\l, stp \reg1, \reg2, [\addr], \post_inc) - .endm - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc - USER(\l, \inst \reg, [\addr], \post_inc) - .endm -#endif - #endif /* __ASSEMBLY__ */ /* diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index c764cc8fb3b6abc734eb3865a99142727c77727f..320d68c0e09c43ec4059f2b9c83f19c22e6a626c 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -66,4 +66,68 @@ alternative_else_nop_endif and \dst, \dst, \addr .endm +/* + * Generate the assembly for UAO alternatives with exception table entries. + * This is complicated as there is no post-increment or pair versions of the + * unprivileged instructions, and USER() only works for single instructions. + */ +#ifdef CONFIG_ARM64_UAO + .macro uao_ldp l, reg1, reg2, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: ldp \reg1, \reg2, [\addr], \post_inc; +8889: nop; + nop; + alternative_else + ldtr \reg1, [\addr]; + ldtr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + _asm_extable 8889b,\l; + + _asm_mc_extable 8888b,\l; + _asm_mc_extable 8889b,\l; + .endm + + .macro uao_stp l, reg1, reg2, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: stp \reg1, \reg2, [\addr], \post_inc; +8889: nop; + nop; + alternative_else + sttr \reg1, [\addr]; + sttr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + _asm_extable 8889b,\l; + .endm + + .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: \inst \reg, [\addr], \post_inc; + nop; + alternative_else + \alt_inst \reg, [\addr]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + + _asm_mc_extable 8888b,\l; + .endm +#else + .macro uao_ldp l, reg1, reg2, addr, post_inc + USER(\l, ldp \reg1, \reg2, [\addr], \post_inc) + .endm + .macro uao_stp l, reg1, reg2, addr, post_inc + USER(\l, stp \reg1, \reg2, [\addr], \post_inc) + .endm + .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc + USER(\l, \inst \reg, [\addr], \post_inc) + .endm +#endif + #endif diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 4a4258f17c868f8acde2d5cfc7cdcfe325d55e82..0e1424ee75e16085245ae02006adb4f869054eed 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -142,9 +142,33 @@ alternative_endif .popsection .endm +/* + * Emit an entry into the machine check exception table + */ +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + .macro _asm_mc_extable, from, to + .pushsection __mc_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection + .endm +#else + .macro _asm_mc_extable, from, to + .endm +#endif + #define USER(l, x...) \ 9999: x; \ - _asm_extable 9999b, l + _asm_extable 9999b, l; \ + _asm_mc_extable 9999b, l + +#define USER_MC(l, x...) \ +9999: x; \ + _asm_mc_extable 9999b, l + +#define CPY_MC(l, x...) \ +9999: x; \ + _asm_mc_extable 9999b, l /* * Register aliases. diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 56a4f68b262efb02417943403e5dcbe7e1ef752e..96b2cdc590066ce720dba1c22b7f57f31df36112 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -23,4 +23,5 @@ struct exception_table_entry #define ARCH_HAS_RELATIVE_EXTABLE extern int fixup_exception(struct pt_regs *regs); +extern int fixup_exception_mc(struct pt_regs *regs); #endif diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index b91593452d560c8684ffd4e095e142b9acd4d251..7528801a315ffec9864357715ff3cbb8fa52d7c4 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -52,6 +52,16 @@ static inline void copy_page(void *to, const void *from) #endif extern void clear_page(void *to); +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +extern void copy_page_mc(void *to, const void *from); +void copy_highpage_mc(struct page *to, struct page *from); +#define __HAVE_ARCH_COPY_HIGHPAGE_MC + +void copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE_MC +#endif + #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5623685c7d138556dd5268db6363ee8faeb99cd4..9ded66ffb800a4bc2eaa0607a97bcf2b14080dbd 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -81,6 +81,8 @@ #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ +extern int sysctl_machine_check_safe; + #ifndef CONFIG_ARM64_FORCE_52BIT #define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\ DEFAULT_MAP_WINDOW) diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index b31e8e87a0db9945f16dce89dea40a2c091fe813..3012ea9c3a1fcb5702d10488b1fab02747a34bc4 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -35,6 +35,10 @@ extern void *memchr(const void *, int, __kernel_size_t); extern void *memcpy(void *, const void *, __kernel_size_t); extern void *__memcpy(void *, const void *, __kernel_size_t); +#define __HAVE_ARCH_MEMCPY_MC +extern unsigned long memcpy_mcs(void *, const void *, __kernel_size_t); +extern unsigned long __memcpy_mcs(void *, const void *, __kernel_size_t); + #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *, const void *, __kernel_size_t); extern void *__memmove(void *, const void *, __kernel_size_t); @@ -48,6 +52,25 @@ extern void *__memset(void *, int, __kernel_size_t); void memcpy_flushcache(void *dst, const void *src, size_t cnt); #endif +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +#define __HAVE_ARCH_MEMCPY_MCSAFE +/** + * memcpy_mcsafe - memory copy that handles source exceptions + * + * @dst: destination address + * @src: source address + * @len: number of bytes to copy + * + * Return 0 for success, or number of bytes not copied if there was an + * exception. + */ +static inline unsigned long __must_check +memcpy_mcsafe(void *to, const void *from, unsigned long size) +{ + return (unsigned long)memcpy_mcs(to, from, size); +} +#endif + #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) /* @@ -56,6 +79,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt); */ #define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memcpy_mcs(dst, src, len) __memcpy_mcs(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len) #define memset(s, c, n) __memset(s, c, n) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 32fc8061aa76ffcca455ee85790058c2a6ada1ed..6bb1f51ebc5a59b7b8f69841fca3d1e46fa53aff 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -101,6 +101,21 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si " .long (" #from " - .), (" #to " - .)\n" \ " .popsection\n" +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +#define _ASM_MC_EXTABLE(from, to) \ + " .pushsection __mc_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" +#else +#define _ASM_MC_EXTABLE(from, to) +#endif + +#define _ASM_KACCESS_EXTABLE(from, to) _ASM_EXTABLE(from, to) +#define _ASM_UACCESS_EXTABLE(from, to) \ + _ASM_EXTABLE(from, to) \ + _ASM_MC_EXTABLE(from, to) + /* * User access enabling/disabling. */ @@ -252,7 +267,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) * The "__xxx_error" versions set the third argument to -EFAULT if an error * occurs, and leave it unchanged on success. */ -#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature, type) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ alt_instr " " reg "1, [%2]\n", feature) \ @@ -263,39 +278,47 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) " mov %1, #0\n" \ " b 2b\n" \ " .previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_##type##ACCESS_EXTABLE(1b, 3b) \ : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT)) -#define __raw_get_user(x, ptr, err) \ +#define __raw_get_mem(x, ptr, err, type) \ do { \ unsigned long __gu_val; \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __get_mem_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ + (err), ARM64_HAS_UAO, type); \ break; \ case 2: \ - __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __get_mem_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ + (err), ARM64_HAS_UAO, type); \ break; \ case 4: \ - __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __get_mem_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ + (err), ARM64_HAS_UAO, type); \ break; \ case 8: \ - __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __get_mem_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ + (err), ARM64_HAS_UAO, type); \ break; \ default: \ BUILD_BUG(); \ } \ - uaccess_disable_not_uao(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) +#define __raw_get_user(x, ptr, err) \ +do { \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ + if (get_fs() == KERNEL_DS) \ + __raw_get_mem(x, ptr, err, K); \ + else \ + __raw_get_mem(x, ptr, err, U); \ + uaccess_disable_not_uao(); \ +} while (0) + #define __get_user_error(x, ptr, err) \ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ @@ -317,7 +340,7 @@ do { \ #define get_user __get_user -#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature, type) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ alt_instr " " reg "1, [%2]\n", feature) \ @@ -327,35 +350,43 @@ do { \ "3: mov %w0, %3\n" \ " b 2b\n" \ " .previous\n" \ - _ASM_EXTABLE(1b, 3b) \ + _ASM_##type##ACCESS_EXTABLE(1b, 3b) \ : "+r" (err) \ : "r" (x), "r" (addr), "i" (-EFAULT)) -#define __raw_put_user(x, ptr, err) \ +#define __raw_put_mem(x, ptr, err, type) \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __put_mem_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ + (err), ARM64_HAS_UAO, type); \ break; \ case 2: \ - __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __put_mem_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ + (err), ARM64_HAS_UAO, type); \ break; \ case 4: \ - __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __put_mem_asm("str", "sttr", "%w", __pu_val, (ptr), \ + (err), ARM64_HAS_UAO, type); \ break; \ case 8: \ - __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __put_mem_asm("str", "sttr", "%x", __pu_val, (ptr), \ + (err), ARM64_HAS_UAO, type); \ break; \ default: \ BUILD_BUG(); \ } \ +} while (0) + +#define __raw_put_user(x, ptr, err) \ +do { \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ + if (get_fs() == KERNEL_DS) \ + __raw_put_mem(x, ptr, err, K); \ + else \ + __raw_put_mem(x, ptr, err, U); \ uaccess_disable_not_uao(); \ } while (0) @@ -444,4 +475,13 @@ static inline int __copy_from_user_flushcache(void *dst, const void __user *src, } #endif +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +static inline unsigned long __must_check +copy_to_user_mcsafe(void *dst, const void *src, size_t cnt) +{ + check_object_size(src, cnt, true); + return raw_copy_to_user(dst, src, cnt); +} +#endif + #endif /* __ASM_UACCESS_H */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 0bab37b1acbe98b857d56166ba497c0507c7c537..e14ad28d7ad5cbd1b59c5448f3969bd83ad9f6e2 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -145,6 +145,7 @@ SECTIONS RO_DATA(PAGE_SIZE) /* everything from this point to */ EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */ + MC_EXCEPTION_TABLE(8) NOTES . = ALIGN(PAGE_SIZE); diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 6b762ee451b21dcd6ddfe9e31967a54d70be789d..7451eb61d46da0bf57ebeb279a11b6771e38c07e 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 lib-y := clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o slow_copy_page.o \ - clear_page.o memchr.o memcpy.o memmove.o memset.o \ + clear_page.o memchr.o memcpy.o memcpy_mc.o memmove.o memset.o \ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ strchr.o strrchr.o tishift.o @@ -14,6 +14,8 @@ endif lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o +lib-$(CONFIG_ARCH_HAS_UACCESS_MCSAFE) += copy_page_mc.o + obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 8e25e89ad01fd7daa41065bae7e1c9745dce986b..5584e53338c40008eafdd0e3608bc5659e2d5dfa 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -20,36 +20,36 @@ * x0 - bytes not copied */ - .macro ldrb1 ptr, regB, val - uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val + .macro ldrb1 reg, ptr, val + uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val .endm - .macro strb1 ptr, regB, val - strb \ptr, [\regB], \val + .macro strb1 reg, ptr, val + USER_MC(9998f, strb \reg, [\ptr], \val) .endm - .macro ldrh1 ptr, regB, val - uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val + .macro ldrh1 reg, ptr, val + uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val .endm - .macro strh1 ptr, regB, val - strh \ptr, [\regB], \val + .macro strh1 reg, ptr, val + USER_MC(9998f, strh \reg, [\ptr], \val) .endm - .macro ldr1 ptr, regB, val - uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val + .macro ldr1 reg, ptr, val + uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val .endm - .macro str1 ptr, regB, val - str \ptr, [\regB], \val + .macro str1 reg, ptr, val + USER_MC(9998f, str \reg, [\ptr], \val) .endm - .macro ldp1 ptr, regB, regC, val - uao_ldp 9998f, \ptr, \regB, \regC, \val + .macro ldp1 reg1, reg2, ptr, val + uao_ldp 9998f, \reg1, \reg2, \ptr, \val .endm - .macro stp1 ptr, regB, regC, val - stp \ptr, \regB, [\regC], \val + .macro stp1 reg1, reg2, ptr, val + USER_MC(9998f, stp \reg1, \reg2, [\ptr], \val) .endm end .req x5 diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 667139013ed171ef4b5de1ba916941060858475c..80e37ada0ee1a501d5d4fb9fe87db9fc5853af2b 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -21,36 +21,36 @@ * Returns: * x0 - bytes not copied */ - .macro ldrb1 ptr, regB, val - uao_user_alternative 9998f, ldrb, ldtrb, \ptr, \regB, \val + .macro ldrb1 reg, ptr, val + uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val .endm - .macro strb1 ptr, regB, val - uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val + .macro strb1 reg, ptr, val + uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val .endm - .macro ldrh1 ptr, regB, val - uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val + .macro ldrh1 reg, ptr, val + uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val .endm - .macro strh1 ptr, regB, val - uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val + .macro strh1 reg, ptr, val + uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val .endm - .macro ldr1 ptr, regB, val - uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val + .macro ldr1 reg, ptr, val + uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val .endm - .macro str1 ptr, regB, val - uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val + .macro str1 reg, ptr, val + uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val .endm - .macro ldp1 ptr, regB, regC, val - uao_ldp 9998f, \ptr, \regB, \regC, \val + .macro ldp1 reg1, reg2, ptr, val + uao_ldp 9998f, \reg1, \reg2, \ptr, \val .endm - .macro stp1 ptr, regB, regC, val - uao_stp 9998f, \ptr, \regB, \regC, \val + .macro stp1 reg1, reg2, ptr, val + uao_stp 9998f, \reg1, \reg2, \ptr, \val .endm end .req x5 diff --git a/arch/arm64/lib/copy_page_mc.S b/arch/arm64/lib/copy_page_mc.S new file mode 100644 index 0000000000000000000000000000000000000000..8d4b9159fa8a9479ed198e9f1ed84f0c70c9333f --- /dev/null +++ b/arch/arm64/lib/copy_page_mc.S @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include + +/* + * Copy a page from src to dest (both are page aligned) with machine check + * + * Parameters: + * x0 - dest + * x1 - src + */ +SYM_FUNC_START(copy_page_mc) +alternative_if ARM64_HAS_NO_HW_PREFETCH + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #256 + add x1, x1, #128 +1: + tst x0, #(PAGE_SIZE - 1) + +alternative_if ARM64_HAS_NO_HW_PREFETCH + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #128 + add x1, x1, #128 + + b.ne 1b + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) + +9998: ret + +SYM_FUNC_END(copy_page_mc) +EXPORT_SYMBOL(copy_page_mc) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 1a104d0089f3a4036574bb6d1c5ac9796740dc5b..2b1a3ef0e4a042adf686cb972f39348259ca7435 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -19,36 +19,36 @@ * Returns: * x0 - bytes not copied */ - .macro ldrb1 ptr, regB, val - ldrb \ptr, [\regB], \val + .macro ldrb1 reg, ptr, val + USER_MC(9998f, ldrb \reg, [\ptr], \val) .endm - .macro strb1 ptr, regB, val - uao_user_alternative 9998f, strb, sttrb, \ptr, \regB, \val + .macro strb1 reg, ptr, val + uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val .endm - .macro ldrh1 ptr, regB, val - ldrh \ptr, [\regB], \val + .macro ldrh1 reg, ptr, val + USER_MC(9998f, ldrh \reg, [\ptr], \val) .endm - .macro strh1 ptr, regB, val - uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val + .macro strh1 reg, ptr, val + uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val .endm - .macro ldr1 ptr, regB, val - ldr \ptr, [\regB], \val + .macro ldr1 reg, ptr, val + USER_MC(9998f, ldr \reg, [\ptr], \val) .endm - .macro str1 ptr, regB, val - uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val + .macro str1 reg, ptr, val + uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val .endm - .macro ldp1 ptr, regB, regC, val - ldp \ptr, \regB, [\regC], \val + .macro ldp1 reg1, reg2, ptr, val + USER_MC(9998f, ldp \reg1, \reg2, [\ptr], \val) .endm - .macro stp1 ptr, regB, regC, val - uao_stp 9998f, \ptr, \regB, \regC, \val + .macro stp1 reg1, reg2, ptr, val + uao_stp 9998f, \reg1, \reg2, \ptr, \val .endm end .req x5 diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S index b03cbb3455d4da23413dc91b468efa79396261b7..dc8d2a216a6e647e93972687207f212f3ddeb6b3 100644 --- a/arch/arm64/lib/memcpy.S +++ b/arch/arm64/lib/memcpy.S @@ -24,36 +24,36 @@ * Returns: * x0 - dest */ - .macro ldrb1 ptr, regB, val - ldrb \ptr, [\regB], \val + .macro ldrb1 reg, ptr, val + ldrb \reg, [\ptr], \val .endm - .macro strb1 ptr, regB, val - strb \ptr, [\regB], \val + .macro strb1 reg, ptr, val + strb \reg, [\ptr], \val .endm - .macro ldrh1 ptr, regB, val - ldrh \ptr, [\regB], \val + .macro ldrh1 reg, ptr, val + ldrh \reg, [\ptr], \val .endm - .macro strh1 ptr, regB, val - strh \ptr, [\regB], \val + .macro strh1 reg, ptr, val + strh \reg, [\ptr], \val .endm - .macro ldr1 ptr, regB, val - ldr \ptr, [\regB], \val + .macro ldr1 reg, ptr, val + ldr \reg, [\ptr], \val .endm - .macro str1 ptr, regB, val - str \ptr, [\regB], \val + .macro str1 reg, ptr, val + str \reg, [\ptr], \val .endm - .macro ldp1 ptr, regB, regC, val - ldp \ptr, \regB, [\regC], \val + .macro ldp1 reg1, reg2, ptr, val + ldp \reg1, \reg2, [\ptr], \val .endm - .macro stp1 ptr, regB, regC, val - stp \ptr, \regB, [\regC], \val + .macro stp1 reg1, reg2, ptr, val + stp \reg1, \reg2, [\ptr], \val .endm SYM_FUNC_START_ALIAS(__memcpy) diff --git a/arch/arm64/lib/memcpy_mc.S b/arch/arm64/lib/memcpy_mc.S new file mode 100644 index 0000000000000000000000000000000000000000..1e76a0d1cc43a5adeb4627280808dd8a6926c75a --- /dev/null +++ b/arch/arm64/lib/memcpy_mc.S @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 ARM Ltd. + * Copyright (C) 2013 Linaro. + * + * This code is based on glibc cortex strings work originally authored by Linaro + * be found @ + * + * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ + * files/head:/src/aarch64/ + */ + +#include +#include +#include + +/* + * Copy a buffer from src to dest (alignment handled by the hardware) + * with machine check safe. + * + * Parameters: + * x0 - dest + * x1 - src + * x2 - n + * Returns: + * x0 - bytes not copied + */ + .macro ldrb1 reg, ptr, val + CPY_MC(9998f, ldrb \reg, [\ptr], \val) + .endm + + .macro strb1 reg, ptr, val + CPY_MC(9998f, strb \reg, [\ptr], \val) + .endm + + .macro ldrh1 reg, ptr, val + CPY_MC(9998f, ldrh \reg, [\ptr], \val) + .endm + + .macro strh1 reg, ptr, val + CPY_MC(9998f, strh \reg, [\ptr], \val) + .endm + + .macro ldr1 reg, ptr, val + CPY_MC(9998f, ldr \reg, [\ptr], \val) + .endm + + .macro str1 reg, ptr, val + CPY_MC(9998f, str \reg, [\ptr], \val) + .endm + + .macro ldp1 reg1, reg2, ptr, val + CPY_MC(9998f, ldp \reg1, \reg2, [\ptr], \val) + .endm + + .macro stp1 reg1, reg2, ptr, val + CPY_MC(9998f, stp \reg1, \reg2, [\ptr], \val) + .endm + +end .req x5 +SYM_FUNC_START_ALIAS(__memcpy_mcs) +SYM_FUNC_START_WEAK_PI(memcpy_mcs) + add end, x0, x2 +#include "copy_template.S" + mov x0, #0 + ret + +9998: sub x0, end, dst + ret +SYM_FUNC_END_PI(memcpy_mcs) +EXPORT_SYMBOL(memcpy_mcs) +SYM_FUNC_END_ALIAS(__memcpy_mcs) +EXPORT_SYMBOL(__memcpy_mcs) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 7eee44d716f9309097c2c5160b23552c5f141bbf..001d38c95bb22dc692cef700fef646089e8a269f 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -34,3 +34,22 @@ void fast_copy_page_switched(const void *from, void *to) (unsigned long)to); } EXPORT_SYMBOL_GPL(fast_copy_page_switched); + +#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE +void copy_highpage_mc(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + + copy_page_mc(kto, kfrom); +} +EXPORT_SYMBOL(copy_highpage_mc); + +void copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + copy_highpage_mc(to, from); + flush_dcache_page(to); +} +EXPORT_SYMBOL_GPL(copy_user_highpage_mc); +#endif diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 81e694af5f8c26736df04b183382d02501811a81..b6a6a9bda50494cd6d009dded0b3c31999219a0d 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -16,3 +16,15 @@ int fixup_exception(struct pt_regs *regs) return fixup != NULL; } + +int fixup_exception_mc(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_mc_exception_tables(instruction_pointer(regs)); + if (!fixup) + return 0; + + regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; + return 1; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6366ffee46138ee9c3b355c591e0b90462e99abc..49a6fab74c46476bb6b2c22dc2709c4835073fcf 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -40,6 +40,8 @@ #include #include +int sysctl_machine_check_safe = 1; + struct fault_info { int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); @@ -1403,6 +1405,34 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) return 1; /* "fault" */ } +static bool arm64_do_kernel_sea(void __user *addr, unsigned int esr, + struct pt_regs *regs, int sig, int code) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_MCSAFE)) + return false; + + if (!sysctl_machine_check_safe) + return false; + + if (user_mode(regs)) + return false; + + if (apei_claim_sea(regs) < 0) + return false; + + if (!fixup_exception_mc(regs)) + return false; + + if (current->flags & PF_KTHREAD) + return true; + + set_thread_esr(0, esr); + arm64_force_sig_fault(sig, code, addr, + "Uncorrected memory error on access to user memory\n"); + + return true; +} + static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -1422,7 +1452,9 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) siaddr = NULL; else siaddr = (void __user *)addr; - arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + + if (!arm64_do_kernel_sea(siaddr, esr, regs, inf->sig, inf->code)) + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 4adf9ef2b0e243e268c8cd4a036dc0e24638e4be..90b55f84fd1b266a8aee920b9d9c2cde2f506206 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -41,6 +41,9 @@ #include #include #include +#ifdef CONFIG_ARM64 +#include +#endif #include #include @@ -517,6 +520,22 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata) #endif } +#ifdef CONFIG_ARM64 +/* + * A platform may describe one error source for the handling of synchronous + * errors (e.g. MCE or SEA), or for handling asynchronous errors (e.g. SCI + * or External Interrupt). On x86, the HEST notifications are always + * asynchronous, so only SEA on ARM is delivered as a synchronous + * notification. + */ +static inline bool is_hest_sync_notify(struct ghes *ghes) +{ + u8 notify_type = ghes->generic->notify.type; + + return notify_type == ACPI_HEST_NOTIFY_SEA; +} +#endif + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -526,6 +545,9 @@ static bool ghes_do_proc(struct ghes *ghes, guid_t *sec_type; const guid_t *fru_id = &guid_null; char *fru_text = ""; +#ifdef CONFIG_ARM64 + bool sync = is_hest_sync_notify(ghes); +#endif sev = ghes_severity(estatus->error_severity); apei_estatus_for_each_section(estatus, gdata) { @@ -562,6 +584,18 @@ static bool ghes_do_proc(struct ghes *ghes, } } +#ifdef CONFIG_ARM64 + /* + * If no memory failure work is queued for abnormal synchronous + * errors, do a force kill. + */ + if (sync && !work_queued) { + pr_err(HW_ERR GHES_PFX "%s:%d: hardware memory corruption (SIGBUS)\n", + current->comm, task_pid_nr(current)); + force_sig(SIGBUS); + } +#endif + return work_queued; } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7ce3cfd965d259ee72190f4c6b448ecc805e9893..e0c2ba3a50baa1077131ad407c906c94f3a865ff 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2349,7 +2349,14 @@ static int elf_core_dump(struct coredump_params *cprm) page = get_dump_page(addr); if (page) { void *kaddr = kmap(page); + +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + current->is_coredump_mcs = 1; +#endif stop = !dump_emit(cprm, kaddr, PAGE_SIZE); +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + current->is_coredump_mcs = 0; +#endif kunmap(page); put_page(page); } else diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index d86ebd0dcc3d3063051a0f313b7f28909de6492e..166927b6d02ab06cd198a0101a5306fbd844f0c9 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1509,7 +1509,14 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm) struct page *page = get_dump_page(addr); if (page) { void *kaddr = kmap(page); + +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + current->is_coredump_mcs = 1; +#endif res = dump_emit(cprm, kaddr, PAGE_SIZE); +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + current->is_coredump_mcs = 0; +#endif kunmap(page); put_page(page); } else { diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 4b52a73b591006c92d6e233a1c7d6477364ec17a..2019c5bd11cab3263986cbf1aafb8c5118c95ec2 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -605,6 +605,21 @@ __stop___ex_table = .; \ } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* + * Machine Check Exception table + */ +#define MC_EXCEPTION_TABLE(align) \ + . = ALIGN(align); \ + __mc_ex_table : AT(ADDR(__mc_ex_table) - LOAD_OFFSET) { \ + __start___mc_ex_table = .; \ + KEEP(*(__mc_ex_table)) \ + __stop___mc_ex_table = .; \ + } +#else +#define MC_EXCEPTION_TABLE(align) +#endif + /* * .BTF */ diff --git a/include/linux/extable.h b/include/linux/extable.h index 4ab9e78f313b7983865a5f6588ecfcb721fcc188..e608f8a8df4e1c04dad6d496d21a1911b470f3e5 100644 --- a/include/linux/extable.h +++ b/include/linux/extable.h @@ -19,18 +19,41 @@ void trim_init_extable(struct module *m); /* Given an address, look for it in the exception tables */ const struct exception_table_entry *search_exception_tables(unsigned long add); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +const struct exception_table_entry *search_mc_exception_tables(unsigned long add); +#else +static inline const struct exception_table_entry * +search_mc_exception_tables(unsigned long add) +{ + return NULL; +} +#endif const struct exception_table_entry * search_kernel_exception_table(unsigned long addr); #ifdef CONFIG_MODULES /* For extable.c to search modules' exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +const struct exception_table_entry *search_module_mc_extables(unsigned long addr); +#else +static inline const struct exception_table_entry * +search_module_mc_extables(unsigned long addr) +{ + return NULL; +} +#endif #else static inline const struct exception_table_entry * search_module_extables(unsigned long addr) { return NULL; } +static inline const struct exception_table_entry * +search_module_mc_extables(unsigned long addr) +{ + return NULL; +} #endif /*CONFIG_MODULES*/ #ifdef CONFIG_BPF_JIT diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ea5cdbd8c2c326cd6f8702bfc2e6aa2e9fbca2c0..5f3e248e03c02f09a9507c386c9a08f2081b8253 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -261,6 +261,10 @@ static inline void copy_user_highpage(struct page *to, struct page *from, #endif +#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE_MC +#define copy_user_highpage_mc copy_user_highpage +#endif + #ifndef __HAVE_ARCH_COPY_HIGHPAGE static inline void copy_highpage(struct page *to, struct page *from) @@ -276,4 +280,8 @@ static inline void copy_highpage(struct page *to, struct page *from) #endif +#ifndef __HAVE_ARCH_COPY_HIGHPAGE_MC +#define copy_highpage_mc copy_highpage +#endif + #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 78b6846003194a1c75a090358dfe5e5e54b67a51..7ab60fdf85297898a90162eb771342371c301e83 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -448,6 +448,11 @@ struct module { keeping pointers to this stuff */ char *args; +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + unsigned int num_mc_exentries; + struct exception_table_entry *mc_extable; +#endif + KABI_RESERVE(1); KABI_RESERVE(2); KABI_RESERVE(3); diff --git a/include/linux/sched.h b/include/linux/sched.h index d7999c06015fadb40b9b25822fadd6e141917491..f6ab69c525a7b2a775273d7d269321b909692c29 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1300,6 +1300,11 @@ struct task_struct { */ randomized_struct_fields_end +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + /* Task coredump support machine check safe */ + unsigned long is_coredump_mcs; +#endif + KABI_RESERVE(1); KABI_RESERVE(2); KABI_RESERVE(3); diff --git a/kernel/extable.c b/kernel/extable.c index f6920a11e28a5500e0606bbdd74b626cb477db07..3a45a0e37b489524324e7d7c95f049ddd2249f64 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -28,6 +28,11 @@ DEFINE_MUTEX(text_mutex); extern struct exception_table_entry __start___ex_table[]; extern struct exception_table_entry __stop___ex_table[]; +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +extern struct exception_table_entry __start___mc_ex_table[]; +extern struct exception_table_entry __stop___mc_ex_table[]; +#endif + /* Cleared by build time tools if the table is already sorted. */ u32 __initdata __visible main_extable_sort_needed = 1; @@ -38,6 +43,14 @@ void __init sort_main_extable(void) pr_notice("Sorting __ex_table...\n"); sort_extable(__start___ex_table, __stop___ex_table); } + +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + if (main_extable_sort_needed && + &__stop___mc_ex_table > &__start___mc_ex_table) { + pr_notice("Sorting __mc_ex_table...\n"); + sort_extable(__start___mc_ex_table, __stop___mc_ex_table); + } +#endif } /* Given an address, look for it in the kernel exception table */ @@ -61,6 +74,22 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) return e; } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* Given an address, look for it in the machine check exception table */ +const +struct exception_table_entry *search_mc_exception_tables(unsigned long addr) +{ + const struct exception_table_entry *e; + + e = search_extable(__start___mc_ex_table, + __stop___mc_ex_table - __start___mc_ex_table, addr); + if (!e) + e = search_module_mc_extables(addr); + + return e; +} +#endif + int init_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_sinittext && diff --git a/kernel/module.c b/kernel/module.c index 13471a694d43cf948e59d6a3af987488fc782981..4b6f47e7ef6d662d6dff99f07f08f2795ed58e16 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3418,6 +3418,11 @@ static int find_module_sections(struct module *mod, struct load_info *info) mod->extable = section_objs(info, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + mod->mc_extable = section_objs(info, "__mc_ex_table", + sizeof(*mod->mc_extable), &mod->num_mc_exentries); +#endif + if (section_addr(info, "__obsparm")) pr_warn("%s: Ignoring obsolete parameters\n", mod->name); @@ -3658,6 +3663,10 @@ static int post_relocation(struct module *mod, const struct load_info *info) /* Sort exception table now relocations are done. */ sort_extable(mod->extable, mod->extable + mod->num_exentries); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + sort_extable(mod->mc_extable, mod->mc_extable + mod->num_mc_exentries); +#endif + /* Copy relocated percpu area over. */ percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, info->sechdrs[info->index.pcpu].sh_size); @@ -4622,6 +4631,35 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) return e; } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* Given an address, look for it in the module machine check safe exception tables. */ +const struct exception_table_entry *search_module_mc_extables(unsigned long addr) +{ + const struct exception_table_entry *e = NULL; + struct module *mod; + + preempt_disable(); + mod = __module_address(addr); + if (!mod) + goto out; + + if (!mod->num_mc_exentries) + goto out; + + e = search_extable(mod->mc_extable, + mod->num_mc_exentries, + addr); +out: + preempt_enable(); + + /* + * Now, if we found one, we are running inside it now, hence + * we cannot unload the module, hence no refcnt needed. + */ + return e; +} +#endif + /* * is_module_address - is this address inside a module? * @addr: the address to check. diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 45183289a51ebfa839f6fd21e66adaff0ac73d32..f6dcc10ad3e91ec18c4d4d60d6930b1fdedeaa39 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2304,6 +2304,17 @@ static struct ctl_table debug_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, +#endif +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + { + .procname = "machine_check_safe", + .data = &sysctl_machine_check_safe, + .maxlen = sizeof(sysctl_machine_check_safe), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, #endif { } }; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 9ea6f7bb830959de00d005588ede7d495143cb4d..abe251587ec97cadb4f65e60cbd63e1d293e0b9d 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -736,6 +736,16 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */ +static void *memcpy_iter(void *to, const void *from, __kernel_size_t size) +{ +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_UACCESS_MCSAFE) + if (current->is_coredump_mcs) + return (void *)memcpy_mcsafe(to, from, size); + else +#endif + return memcpy(to, from, size); +} + size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; @@ -749,7 +759,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy_iter((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) return bytes; @@ -979,7 +989,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), - memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) + memcpy_iter((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) ) kunmap_atomic(kaddr); return bytes; diff --git a/mm/memory.c b/mm/memory.c index c55d3659112bb78073b942318efaf7511b877fb6..f9fa380d36771cda3ab5b0defa3735799ff07ed4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2172,7 +2172,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, debug_dma_assert_idle(src); if (likely(src)) { - copy_user_highpage(dst, src, addr, vma); + copy_user_highpage_mc(dst, src, addr, vma); return true; }