From 12d42e0d13886ca8342cc03a1a18f3c575b10fbc Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:06 +0800 Subject: [PATCH 01/37] livepatch: Fix patching functions which have static_call hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60L10 CVE: NA -------------------------------- It was reported that if 'static_call' is used in a old function, then the livepatch module created by kpatch for that old function cannot be inserted normally. Root cause is that relocation of static_call symbols in livepatch module has not been done while initing: load_module prepare_coming_module blocking_notifier_call_chain_robust notifier_call_chain_robust static_call_module_notify <-- 1. static_call symbols init here, but relocation is done at below MARK "2." do_init_module do_one_initcall klp_register_patch klp_init_patch klp_init_object klp_init_object_loaded <-- 2. relocate .klp.xxx here To solve it, we move the static_call initialization after relocation. Signed-off-by: Zheng Yejian --- kernel/livepatch/core.c | 19 +++++++++++++++++++ kernel/static_call.c | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index f6981faa18a8..338c2624de0e 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1192,6 +1192,12 @@ static void klp_init_patch_early(struct klp_patch *patch) } } +#if defined(CONFIG_HAVE_STATIC_CALL_INLINE) +extern int klp_static_call_register(struct module *mod); +#else +static inline int klp_static_call_register(struct module *mod) { return 0; } +#endif + static int klp_init_patch(struct klp_patch *patch) { struct klp_object *obj; @@ -1223,6 +1229,19 @@ static int klp_init_patch(struct klp_patch *patch) pr_err("register jump label failed, ret=%d\n", ret); return ret; } + ret = klp_static_call_register(patch->mod); + if (ret) { + /* + * We no need to distinctly clean pre-registered jump_label + * here because it will be clean at path: + * load_module + * do_init_module + * fail_free_freeinit: <-- notify GOING here + */ + module_enable_ro(patch->mod, true); + pr_err("register static call failed, ret=%d\n", ret); + return ret; + } module_enable_ro(patch->mod, true); #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY diff --git a/kernel/static_call.c b/kernel/static_call.c index 43ba0b1e0edb..d38f6a92e3e4 100644 --- a/kernel/static_call.c +++ b/kernel/static_call.c @@ -356,6 +356,9 @@ static int static_call_add_module(struct module *mod) struct static_call_site *stop = start + mod->num_static_call_sites; struct static_call_site *site; + if (unlikely(!mod_klp_rel_completed(mod))) + return 0; + for (site = start; site != stop; site++) { unsigned long s_key = __static_call_key(site); unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS; @@ -398,6 +401,9 @@ static void static_call_del_module(struct module *mod) struct static_call_mod *site_mod, **prev; struct static_call_site *site; + if (unlikely(!mod_klp_rel_completed(mod))) + return; + for (site = start; site < stop; site++) { key = static_call_key(site); if (key == prev_key) @@ -450,8 +456,21 @@ static struct notifier_block static_call_module_nb = { .notifier_call = static_call_module_notify, }; +int klp_static_call_register(struct module *mod) +{ + int ret; + + ret = static_call_module_notify(&static_call_module_nb, MODULE_STATE_COMING, mod); + return notifier_to_errno(ret); +} + #else +int klp_static_call_register(struct module *mod) +{ + return 0; +} + static inline int __static_call_mod_text_reserved(void *start, void *end) { return 0; -- Gitee From f751eb54126647e97dcb7e3dfde9a3b7e8e0c32c Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:07 +0800 Subject: [PATCH 02/37] livepatch/x86: Rename old_code to old_insns hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60L10 CVE: NA -------------------------------- In arm/arm64/ppc32/ppc64, this field is named as old_insns, so uniform it. Signed-off-by: Zheng Yejian --- arch/x86/include/asm/livepatch.h | 2 +- arch/x86/kernel/livepatch.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index b510f935ec11..dbcf69b9c4cb 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -36,7 +36,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable); #define JMP_E9_INSN_SIZE 5 struct arch_klp_data { - unsigned char old_code[JMP_E9_INSN_SIZE]; + unsigned char old_insns[JMP_E9_INSN_SIZE]; #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY /* * Saved opcode at the entry of the old func (which maybe replaced diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index d134169488b6..5488bf014637 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -483,7 +483,7 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) /* Prevent text modification */ mutex_lock(&text_mutex); - ret = copy_from_kernel_nofault(arch_data->old_code, + ret = copy_from_kernel_nofault(arch_data->old_insns, old_func, JMP_E9_INSN_SIZE); mutex_unlock(&text_mutex); @@ -525,7 +525,7 @@ void arch_klp_unpatch_func(struct klp_func *func) ip = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - new = func_node->arch_data.old_code; + new = func_node->arch_data.old_insns; } else { next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node); -- Gitee From 7a05719fd64b15b8185d0078189d6880e770804b Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:08 +0800 Subject: [PATCH 03/37] livepatch/core: Restrict minimum size of function that can be patched hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60L10 CVE: NA -------------------------------- If a function is patched, instructions at the beginning are modified to be 'jump codes' which jump to new function. This requires the function be big enough, otherwise the modification may be out of function range. Currently each architecture needs to implement arch_klp_func_can_patch() to check function size. However, there exists following problems: 1. arch 'x86' didn't implement arch_klp_func_can_patch(); 2. implementations in arm64 & ppc32, function size is checked only if there's a long jump. There is a scenario where a very short function is successfully patched, but as kernel module increases, someday long jump is required, then the function become unable to be patched. 3. implementaions look like duplicate. In this patch, introduce macro KLP_MAX_REPLACE_SIZE to denote the maximum size that will be replaced on patching, then move the check ahead into klp_init_object_loaded(). Fixes: c33e42836a74 ("livepatch/core: Allow implementation without ftrace") Signed-off-by: Zheng Yejian --- arch/arm/include/asm/livepatch.h | 2 ++ arch/arm/kernel/livepatch.c | 25 ------------------------- arch/arm64/include/asm/livepatch.h | 2 ++ arch/arm64/kernel/livepatch.c | 25 ------------------------- arch/powerpc/include/asm/livepatch.h | 2 ++ arch/powerpc/kernel/livepatch_32.c | 18 ------------------ arch/powerpc/kernel/livepatch_64.c | 15 --------------- arch/x86/include/asm/livepatch.h | 2 ++ kernel/livepatch/core.c | 15 ++++++--------- 9 files changed, 14 insertions(+), 92 deletions(-) diff --git a/arch/arm/include/asm/livepatch.h b/arch/arm/include/asm/livepatch.h index 47d8b01618c7..445a78d83d21 100644 --- a/arch/arm/include/asm/livepatch.h +++ b/arch/arm/include/asm/livepatch.h @@ -57,6 +57,8 @@ struct arch_klp_data { u32 saved_opcode; }; +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 713ce67fa6e3..bc09f338e713 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -496,28 +496,3 @@ void arch_klp_unpatch_func(struct klp_func *func) do_patch(pc, (unsigned long)next_func->new_func); } } - -#ifdef CONFIG_ARM_MODULE_PLTS -/* return 0 if the func can be patched */ -int arch_klp_func_can_patch(struct klp_func *func) -{ - unsigned long pc = (unsigned long)func->old_func; - unsigned long new_addr = (unsigned long)func->new_func; - unsigned long old_size = func->old_size; - - if (!old_size) - return -EINVAL; - - if (!offset_in_range(pc, new_addr, SZ_32M) && - (old_size < LJMP_INSN_SIZE * ARM_INSN_SIZE)) { - pr_err("func %s size less than limit\n", func->old_name); - return -EPERM; - } - return 0; -} -#else -int arch_klp_func_can_patch(struct klp_func *func) -{ - return 0; -} -#endif /* #ifdef CONFIG_ARM_MODULE_PLTS */ diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h index bcb6c4081978..c41a22adc944 100644 --- a/arch/arm64/include/asm/livepatch.h +++ b/arch/arm64/include/asm/livepatch.h @@ -66,6 +66,8 @@ struct arch_klp_data { u32 saved_opcode; }; +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index cda56066d859..8ec09c22dc26 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -483,28 +483,3 @@ void arch_klp_unpatch_func(struct klp_func *func) do_patch(pc, (unsigned long)next_func->new_func); } } - -#ifdef CONFIG_ARM64_MODULE_PLTS -/* return 0 if the func can be patched */ -int arch_klp_func_can_patch(struct klp_func *func) -{ - unsigned long pc = (unsigned long)func->old_func; - unsigned long new_addr = (unsigned long)func->new_func; - unsigned long old_size = func->old_size; - - if ((long)old_size <= 0) - return -EINVAL; - - if (!offset_in_range(pc, new_addr, SZ_128M) && - (old_size < LJMP_INSN_SIZE * sizeof(u32))) { - pr_err("func %s size less than limit\n", func->old_name); - return -EPERM; - } - return 0; -} -#else -int arch_klp_func_can_patch(struct klp_func *func) -{ - return 0; -} -#endif diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index 39dcfc3c28ce..ae674ea59ab3 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -118,6 +118,8 @@ struct arch_klp_data { #endif /* CONFIG_PPC64 */ +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + struct stackframe { unsigned long sp; unsigned long pc; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 8f53386e7cf8..4eefae2f92dc 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -488,22 +488,4 @@ void arch_klp_unpatch_func(struct klp_func *func) do_patch(pc, (unsigned long)next_func->new_func); } } - -/* return 0 if the func can be patched */ -int arch_klp_func_can_patch(struct klp_func *func) -{ - unsigned long pc = (unsigned long)func->old_func; - unsigned long new_addr = (unsigned long)func->new_func; - unsigned long old_size = func->old_size; - - if (!old_size) - return -EINVAL; - - if (!offset_in_range(pc, new_addr, SZ_32M) && - (old_size < LJMP_INSN_SIZE * sizeof(u32))) { - pr_err("func %s size less than limit\n", func->old_name); - return -EPERM; - } - return 0; -} #endif diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index cbb5e02cccff..aca7361ac12b 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -491,21 +491,6 @@ void arch_klp_unpatch_func(struct klp_func *func) } } -/* return 0 if the func can be patched */ -int arch_klp_func_can_patch(struct klp_func *func) -{ - unsigned long old_size = func->old_size; - - if (!old_size) - return -EINVAL; - - if (old_size < LJMP_INSN_SIZE * sizeof(u32)) { - pr_err("func %s size less than limit\n", func->old_name); - return -EPERM; - } - return 0; -} - int arch_klp_init_func(struct klp_object *obj, struct klp_func *func) { #ifdef PPC64_ELF_ABI_v1 diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index dbcf69b9c4cb..e2cef5b2d8aa 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -46,6 +46,8 @@ struct arch_klp_data { #endif }; +#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) + long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY int arch_klp_check_breakpoint(struct arch_klp_data *arch_data, void *old_func); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 338c2624de0e..485c3a70c8bc 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -938,11 +938,6 @@ void klp_free_replaced_patches_async(struct klp_patch *new_patch) } #ifdef CONFIG_LIVEPATCH_WO_FTRACE -int __weak arch_klp_func_can_patch(struct klp_func *func) -{ - return 0; -} - int __weak arch_klp_init_func(struct klp_object *obj, struct klp_func *func) { return 0; @@ -965,9 +960,6 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) else func->old_mod = NULL; #endif - ret = arch_klp_func_can_patch(func); - if (ret) - return ret; ret = arch_klp_init_func(obj, func); if (ret) @@ -1043,11 +1035,16 @@ static int klp_init_object_loaded(struct klp_patch *patch, ret = kallsyms_lookup_size_offset((unsigned long)func->old_func, &func->old_size, NULL); - if (!ret) { + if (!ret || ((long)func->old_size < 0)) { pr_err("kallsyms size lookup failed for '%s'\n", func->old_name); return -ENOENT; } + if (func->old_size < KLP_MAX_REPLACE_SIZE) { + pr_err("%s size less than limit (%lu < %ld)\n", func->old_name, + func->old_size, KLP_MAX_REPLACE_SIZE); + return -EINVAL; + } #ifdef PPC64_ELF_ABI_v1 /* -- Gitee From 9cb2978cfd1080def7a80e71194631696cf84352 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:09 +0800 Subject: [PATCH 04/37] livepatch/x86: Avoid conflict with static {call,key} hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60L10 CVE: NA -------------------------------- static call and static key allow user to modify instructions on call site, relate configs are: CONFIG_HAVE_STATIC_CALL_INLINE for static call, CONFIG_JUMP_LABEL for static key. When they exist in first several instruction of an old function, and livepatch could also modify there, then confliction happened. To avoid the confliction, we don't allow a livepatch module of this case to be inserted. Fixes: c33e42836a74 ("livepatch/core: Allow implementation without ftrace") Signed-off-by: Zheng Yejian --- kernel/livepatch/core.c | 42 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 485c3a70c8bc..3f6d66c54e6d 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -34,6 +34,7 @@ #include #include #endif +#include /* * klp_mutex is a coarse lock which serializes access to klp data. All @@ -1195,6 +1196,43 @@ extern int klp_static_call_register(struct module *mod); static inline int klp_static_call_register(struct module *mod) { return 0; } #endif +static int check_address_conflict(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + int ret; + void *start; + void *end; + + /* + * Locks seem required as comment of jump_label_text_reserved() said: + * Caller must hold jump_label_mutex. + * But looking into implementation of jump_label_text_reserved() and + * static_call_text_reserved(), call sites of every jump_label or static_call + * are checked, and they won't be changed after corresponding module inserted, + * so no need to take jump_label_lock and static_call_lock here. + */ + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + start = func->old_func; + end = start + KLP_MAX_REPLACE_SIZE - 1; + ret = jump_label_text_reserved(start, end); + if (ret) { + pr_err("'%s' has static key in first %ld bytes, ret=%d\n", + func->old_name, KLP_MAX_REPLACE_SIZE, ret); + return -EINVAL; + } + ret = static_call_text_reserved(start, end); + if (ret) { + pr_err("'%s' has static call in first %ld bytes, ret=%d\n", + func->old_name, KLP_MAX_REPLACE_SIZE, ret); + return -EINVAL; + } + } + } + return 0; +} + static int klp_init_patch(struct klp_patch *patch) { struct klp_object *obj; @@ -1241,6 +1279,10 @@ static int klp_init_patch(struct klp_patch *patch) } module_enable_ro(patch->mod, true); + ret = check_address_conflict(patch); + if (ret) + return ret; + #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY klp_for_each_object(patch, obj) klp_load_hook(obj); -- Gitee From b6c6d6e9a87e65a44205c462474da3775b6f5fa9 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:10 +0800 Subject: [PATCH 05/37] livepatch/core: Fix compile warning when print logs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60L10 CVE: NA -------------------------------- Fix compile warnings as below: include/linux/kern_levels.h:11:18: note: in expansion of macro ‘KERN_SOH’ 11 | #define KERN_ERR KERN_SOH "3" /* error conditions */ | ^~~~~~~~ include/linux/printk.h:392:9: note: in expansion of macro ‘KERN_ERR’ 392 | printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ^~~~~~~~ kernel/livepatch/core.c:1045:4: note: in expansion of macro ‘pr_err’ 1045 | pr_err("%s size less than limit (%lu < %ld)\n", func->old_name, | ^~~~~~ Fixes: aba04b9defb0 ("[Huawei] livepatch/core: Restrict minimum size of function that can be patched") Fixes: af56c7290f29 ("[Huawei] livepatch/x86: Avoid conflict with static {call,key}") Signed-off-by: Zheng Yejian --- kernel/livepatch/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 3f6d66c54e6d..c8ef647c9cc4 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1042,7 +1042,7 @@ static int klp_init_object_loaded(struct klp_patch *patch, return -ENOENT; } if (func->old_size < KLP_MAX_REPLACE_SIZE) { - pr_err("%s size less than limit (%lu < %ld)\n", func->old_name, + pr_err("%s size less than limit (%lu < %zu)\n", func->old_name, func->old_size, KLP_MAX_REPLACE_SIZE); return -EINVAL; } @@ -1218,13 +1218,13 @@ static int check_address_conflict(struct klp_patch *patch) end = start + KLP_MAX_REPLACE_SIZE - 1; ret = jump_label_text_reserved(start, end); if (ret) { - pr_err("'%s' has static key in first %ld bytes, ret=%d\n", + pr_err("'%s' has static key in first %zu bytes, ret=%d\n", func->old_name, KLP_MAX_REPLACE_SIZE, ret); return -EINVAL; } ret = static_call_text_reserved(start, end); if (ret) { - pr_err("'%s' has static call in first %ld bytes, ret=%d\n", + pr_err("'%s' has static call in first %zu bytes, ret=%d\n", func->old_name, KLP_MAX_REPLACE_SIZE, ret); return -EINVAL; } -- Gitee From 010f535838659f04e199fc5e72c15ce80208b1db Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:11 +0800 Subject: [PATCH 06/37] livepatch: Fix warning C_RULE_ID_MAGICNUMBER hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60MKD CVE: NA -------------------------------- CodeCheck reports that: Do not use magic numbers.The number is 10 Origin patch in other branch introduced a problem which is fixed by commit 7cfd1003915a ("[Huawei] livepatch/arm64: Fix migration thread check"). In this patch, directly merge that fix. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 11 +---------- arch/arm64/kernel/livepatch.c | 11 +---------- arch/powerpc/kernel/livepatch_32.c | 11 +---------- arch/powerpc/kernel/livepatch_64.c | 11 +---------- arch/x86/kernel/livepatch.c | 2 +- include/linux/livepatch.h | 16 ++++++++++++++++ 6 files changed, 21 insertions(+), 41 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index bc09f338e713..b4d26474ba33 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -277,16 +277,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); frame.pc = (unsigned long)do_check_calltrace; - } else if (strncmp(t->comm, "migration/", 10) == 0) { - /* - * current on other CPU - * we call this in stop_machine, so the current - * of each CPUs is mirgation, just compare the - * task_comm here, because we can't get the - * cpu_curr(task_cpu(t))). This assumes that no - * other thread will pretend to be a stopper via - * task_comm. - */ + } else if (klp_is_migration_thread(t->comm)) { continue; } else { frame.fp = thread_saved_fp(t); diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 8ec09c22dc26..4c04899fa2a8 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -274,16 +274,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, /* current on this CPU */ frame.fp = (unsigned long)__builtin_frame_address(0); frame.pc = (unsigned long)do_check_calltrace; - } else if (strncmp(t->comm, "migration/", 10) == 0) { - /* - * current on other CPU - * we call this in stop_machine, so the current - * of each CPUs is mirgation, just compare the - * task_comm here, because we can't get the - * cpu_curr(task_cpu(t))). This assumes that no - * other thread will pretend to be a stopper via - * task_comm. - */ + } else if (klp_is_migration_thread(t->comm)) { continue; } else { frame.fp = thread_saved_fp(t); diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 4eefae2f92dc..3b9e720983ca 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -293,16 +293,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, * backtrace is so similar */ stack = (unsigned long *)current_stack_pointer; - } else if (strncmp(t->comm, "migration/", 10) == 0) { - /* - * current on other CPU - * we call this in stop_machine, so the current - * of each CPUs is mirgation, just compare the - * task_comm here, because we can't get the - * cpu_curr(task_cpu(t))). This assumes that no - * other thread will pretend to be a stopper via - * task_comm. - */ + } else if (klp_is_migration_thread(t->comm)) { continue; } else { /* diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index aca7361ac12b..8c7b44acf74e 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -314,16 +314,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, * so similar */ stack = (unsigned long *)current_stack_pointer; - } else if (strncmp(t->comm, "migration/", 10) == 0) { - /* - * current on other CPU - * we call this in stop_machine, so the current - * of each CPUs is mirgation, just compare the - * task_comm here, because we can't get the - * cpu_curr(task_cpu(t))). This assumes that no - * other thread will pretend to be a stopper via - * task_comm. - */ + } else if (klp_is_migration_thread(t->comm)) { continue; } else { /* diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 5488bf014637..0241e560bd2e 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -315,7 +315,7 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da #endif for_each_process_thread(g, t) { - if (!strncmp(t->comm, "migration/", 10)) + if (klp_is_migration_thread(t->comm)) continue; #ifdef CONFIG_ARCH_STACKWALK diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9301f8e9bb90..56ad1c1dd83e 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -316,6 +316,22 @@ static inline bool klp_have_reliable_stack(void) { return true; } #define klp_smp_isb() #endif +#define KLP_MIGRATION_NAME_PREFIX "migration/" +static inline bool klp_is_migration_thread(const char *task_name) +{ + /* + * current on other CPU + * we call this in stop_machine, so the current + * of each CPUs is migration, just compare the + * task_comm here, because we can't get the + * cpu_curr(task_cpu(t))). This assumes that no + * other thread will pretend to be a stopper via + * task_comm. + */ + return !strncmp(task_name, KLP_MIGRATION_NAME_PREFIX, + sizeof(KLP_MIGRATION_NAME_PREFIX) - 1); +} + #endif /* CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ #else /* !CONFIG_LIVEPATCH */ -- Gitee From c696be3653e295166c8804f9bdc750532d0b3176 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:12 +0800 Subject: [PATCH 07/37] livepatch: Fix warning C_RULE_ID_PRINT_NUMBER_DONT_USE_BRACKET hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60MKD CVE: NA -------------------------------- CodeCheck reports that: Do not use parentheses when printing numbers. Signed-off-by: Zheng Yejian --- arch/arm64/kernel/livepatch.c | 4 ++-- arch/powerpc/kernel/livepatch_32.c | 4 ++-- arch/powerpc/kernel/livepatch_64.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 4c04899fa2a8..6b5bcb491125 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -416,7 +416,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) for (i = 0; i < LJMP_INSN_SIZE; i++) { ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]); if (ret) { - pr_err("patch instruction(%d) large range failed, ret=%d\n", + pr_err("patch instruction %d large range failed, ret=%d\n", i, ret); return -EPERM; } @@ -462,7 +462,7 @@ void arch_klp_unpatch_func(struct klp_func *func) ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, func_node->arch_data.old_insns[i]); if (ret) { - pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + pr_err("restore instruction %d failed, ret=%d\n", i, ret); return; } } diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 3b9e720983ca..7b4ed23bf2ca 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -431,7 +431,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), ppc_inst(insns[i])); if (ret) { - pr_err("patch instruction(%d) large range failed, ret=%d\n", + pr_err("patch instruction %d large range failed, ret=%d\n", i, ret); return -EPERM; } @@ -469,7 +469,7 @@ void arch_klp_unpatch_func(struct klp_func *func) ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), ppc_inst(func_node->arch_data.old_insns[i])); if (ret) { - pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + pr_err("restore instruction %d failed, ret=%d\n", i, ret); return; } } diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 8c7b44acf74e..416f9f03d747 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -467,7 +467,7 @@ void arch_klp_unpatch_func(struct klp_func *func) ret = patch_instruction((struct ppc_inst *)((u32 *)pc + i), ppc_inst(func_node->arch_data.old_insns[i])); if (ret) { - pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + pr_err("restore instruction %d failed, ret=%d\n", i, ret); break; } } -- Gitee From 5aa8a3b22f38a3c0096b4dff12f5d6bd8a507ac1 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:13 +0800 Subject: [PATCH 08/37] livepatch/core: Fix warning C_RULE_ID_SINGLE_BRANCH_IF_AND_LOOP_BRACKET hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60MKD CVE: NA -------------------------------- Fix warning that single branch not need bracket. Signed-off-by: Zheng Yejian --- kernel/livepatch/core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index c8ef647c9cc4..4cbc1a7d092d 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1054,10 +1054,9 @@ static int klp_init_object_loaded(struct klp_patch *patch, * feature 'function descriptor'), otherwise size found by * 'kallsyms_lookup_size_offset' may be abnormal. */ - if (func->old_name[0] != '.') { + if (func->old_name[0] != '.') pr_warn("old_name '%s' may miss the prefix '.', old_size=%lu\n", func->old_name, func->old_size); - } #endif if (func->nop) -- Gitee From b1bfc1832a8a93d41ec2877e447114232b4f804f Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:14 +0800 Subject: [PATCH 09/37] livepatch: Fix warning C_RULE_ID_BLANK_LINE hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60MKD CVE: NA -------------------------------- CodeCheck reports that: Do not add blank lines on the start of a code block defined by braces. Signed-off-by: Zheng Yejian --- kernel/livepatch/core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 4cbc1a7d092d..9e65f6ae4061 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1564,7 +1564,6 @@ static int klp_mem_prepare(struct klp_patch *patch) static void remove_breakpoint(struct klp_func *func, bool restore) { - struct klp_func_node *func_node = klp_find_func_node(func->old_func); struct arch_klp_data *arch_data = &func_node->arch_data; -- Gitee From fa53b53de48b4310b9a49e9bfeb7b58fd97c0264 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Wed, 16 Nov 2022 14:47:15 +0800 Subject: [PATCH 10/37] livepatch: Avoid CPU hogging with cond_resched mainline inclusion from mainline-v5.17-rc1 commit f5bdb34bf0c9314548f2d8e2360b703ff3610303 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60MYE CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f5bdb34bf0c9314548f2d8e2360b703ff3610303 -------------------------------- When initializing a 'struct klp_object' in klp_init_object_loaded(), and performing relocations in klp_resolve_symbols(), klp_find_object_symbol() is invoked to look up the address of a symbol in an already-loaded module (or vmlinux). This, in turn, calls kallsyms_on_each_symbol() or module_kallsyms_on_each_symbol() to find the address of the symbol that is being patched. It turns out that symbol lookups often take up the most CPU time when enabling and disabling a patch, and may hog the CPU and cause other tasks on that CPU's runqueue to starve -- even in paths where interrupts are enabled. For example, under certain workloads, enabling a KLP patch with many objects or functions may cause ksoftirqd to be starved, and thus for interrupts to be backlogged and delayed. This may end up causing TCP retransmits on the host where the KLP patch is being applied, and in general, may cause any interrupts serviced by softirqd to be delayed while the patch is being applied. So as to ensure that kallsyms_on_each_symbol() does not end up hogging the CPU, this patch adds a call to cond_resched() in kallsyms_on_each_symbol() and module_kallsyms_on_each_symbol(), which are invoked when doing a symbol lookup in vmlinux and a module respectively. Without this patch, if a live-patch is applied on a 36-core Intel host with heavy TCP traffic, a ~10x spike is observed in TCP retransmits while the patch is being applied. Additionally, collecting sched events with perf indicates that ksoftirqd is awakened ~1.3 seconds before it's eventually scheduled. With the patch, no increase in TCP retransmit events is observed, and ksoftirqd is scheduled shortly after it's awakened. Signed-off-by: David Vernet Acked-by: Miroslav Benes Acked-by: Song Liu Signed-off-by: Petr Mladek Link: https://lore.kernel.org/r/20211229215646.830451-1-void@manifault.com Signed-off-by: Zheng Yejian --- kernel/kallsyms.c | 1 + kernel/module.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index fe9de067771c..c6738525fe11 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -191,6 +191,7 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, ret = fn(data, namebuf, NULL, kallsyms_sym_address(i)); if (ret != 0) return ret; + cond_resched(); } return module_kallsyms_on_each_symbol(fn, data); } diff --git a/kernel/module.c b/kernel/module.c index cfa3d8c370a8..00aabcd30e4e 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -4484,6 +4484,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, mod, kallsyms_symbol_value(sym)); if (ret != 0) return ret; + + cond_resched(); } } return 0; -- Gitee From 61722bec216c04d1b3e8bebda1b2d4261176703c Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:16 +0800 Subject: [PATCH 11/37] livepatch/ppc64: Fix preemption check when enabling hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60N44 CVE: NA -------------------------------- Misspelling of 'CONFIG_PREEMPTION' may cause old function not being checked, which results in a running function being livepatched. Fixes: 20106abf1e74 ("livepatch: Check whole stack when CONFIG_PREEMPT is set") Signed-off-by: Zheng Yejian --- arch/powerpc/kernel/livepatch_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 416f9f03d747..a2ec7c8c1bad 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -174,7 +174,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, * excution of instructions to be repalced is * complete. */ - if (IS_ENABLED(CONFIG_PREEMTION) || + if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func_to_list(check_funcs, &pcheck, -- Gitee From 1dfbdcb866f66d4242524338c036a33a2c0f540a Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:17 +0800 Subject: [PATCH 12/37] livepatch: Move 'struct klp_func_list' out of arch hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- We need an interface to check calltrace of a certain task to find if the task is running in the old function that ready to be patched. Since klp_check_calltrace() do the similar thing except that it checks all tasks, We can do little refactor on it to extract a function like klp_check_task_calltrace(). Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 8 -------- arch/arm64/kernel/livepatch.c | 8 -------- arch/powerpc/kernel/livepatch_32.c | 8 -------- arch/powerpc/kernel/livepatch_64.c | 8 -------- arch/x86/kernel/livepatch.c | 8 -------- include/linux/livepatch.h | 7 +++++++ 6 files changed, 7 insertions(+), 40 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index b4d26474ba33..4a5366edade5 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -64,14 +64,6 @@ static bool is_jump_insn(u32 insn) return false; } -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 6b5bcb491125..e4070c55b8ed 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -57,14 +57,6 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr, ((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \ ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800)) -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 7b4ed23bf2ca..52356621934b 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -54,14 +54,6 @@ static bool is_jump_insn(u32 insn) return false; } -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index a2ec7c8c1bad..f2b40da2e120 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -59,14 +59,6 @@ static bool is_jump_insn(u32 insn) return false; } -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 0241e560bd2e..d34f618f2809 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -52,14 +52,6 @@ static bool is_jump_insn(u8 *insn) return false; } -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - static inline unsigned long klp_size_to_check(unsigned long func_size, int force) { diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 56ad1c1dd83e..23fb19d74311 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -259,6 +259,13 @@ int klp_compare_address(unsigned long pc, unsigned long func_addr, void arch_klp_init(void); int klp_module_delete_safety_check(struct module *mod); +struct klp_func_list { + struct klp_func_list *next; + unsigned long func_addr; + unsigned long func_size; + const char *func_name; + int force; +}; #endif int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, -- Gitee From 4b45945282012671c18d90498a3cdad45ccf7d24 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:18 +0800 Subject: [PATCH 13/37] livepatch/x86: Move 'struct klp_func_list' related codes out of arch hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- First move 'struct klp_func_list' related codes into 'kernel/livepatch/core.c', preparing for subsequent reducing same code in other arch. Signed-off-by: Zheng Yejian --- arch/x86/include/asm/livepatch.h | 5 -- arch/x86/kernel/livepatch.c | 96 ++++---------------------------- include/linux/livepatch.h | 5 ++ kernel/livepatch/core.c | 88 ++++++++++++++++++++++++++++- 4 files changed, 102 insertions(+), 92 deletions(-) diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index e2cef5b2d8aa..5ffd1de9ce48 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -26,11 +26,6 @@ int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func); #endif -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - - #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined(CONFIG_LIVEPATCH_WO_FTRACE) diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index d34f618f2809..d3a1597f4811 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -52,16 +52,6 @@ static bool is_jump_insn(u8 *insn) return false; } -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > JMP_E9_INSN_SIZE) - size = JMP_E9_INSN_SIZE; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { int len = JMP_E9_INSN_SIZE; @@ -82,32 +72,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -156,7 +122,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -189,7 +155,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -197,7 +163,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -238,21 +204,6 @@ static void klp_print_stack_trace(void *trace_ptr, int trace_len) #endif #define MAX_STACK_ENTRIES 100 -static bool check_func_list(void *data, int *ret, unsigned long pc) -{ - struct klp_func_list *funcs = (struct klp_func_list *)data; - - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - static int klp_check_stack(void *trace_ptr, int trace_len, bool (*fn)(void *, int *, unsigned long), void *data) { @@ -284,17 +235,6 @@ static int klp_check_stack(void *trace_ptr, int trace_len, return 0; } -static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } -} - static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) { struct task_struct *g, *t; @@ -343,27 +283,6 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) -{ - int ret = 0; - struct klp_func_list *check_funcs = NULL; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - - if (!check_funcs) - goto out; - - ret = do_check_calltrace(check_func_list, (void *)check_funcs); - -out: - free_list(&check_funcs); - return ret; -} - static bool check_module_calltrace(void *data, int *ret, unsigned long pc) { struct module *mod = (struct module *)data; @@ -376,6 +295,11 @@ static bool check_module_calltrace(void *data, int *ret, unsigned long pc) return true; } +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) +{ + return do_check_calltrace(check_func, data); +} + int arch_klp_module_check_calltrace(void *data) { return do_check_calltrace(check_module_calltrace, data); diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 23fb19d74311..b4cf90c03d29 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -266,6 +266,11 @@ struct klp_func_list { const char *func_name; int force; }; + +typedef int (*klp_add_func_t)(struct klp_func_list **funcs, struct klp_func_list **func, + unsigned long func_addr, unsigned long func_size, + const char *func_name, int force); + #endif int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 9e65f6ae4061..382bb4a73bce 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1325,11 +1325,97 @@ static int __klp_disable_patch(struct klp_patch *patch) return 0; } #elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) -int __weak klp_check_calltrace(struct klp_patch *patch, int enable) +int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + return -EINVAL; +} + +int __weak arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, + struct klp_func_list **func_list) +{ + return -EINVAL; +} + +static inline unsigned long klp_size_to_check(unsigned long func_size, + int force) +{ + unsigned long size = func_size; + + if (force == KLP_STACK_OPTIMIZE && size > KLP_MAX_REPLACE_SIZE) + size = KLP_MAX_REPLACE_SIZE; + return size; +} + +static bool check_func_list(void *data, int *ret, unsigned long pc) { + struct klp_func_list *funcs = (struct klp_func_list *)data; + + while (funcs != NULL) { + *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, + klp_size_to_check(funcs->func_size, funcs->force)); + if (*ret) + return false; + funcs = funcs->next; + } + return true; +} + +static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, + unsigned long func_addr, unsigned long func_size, const char *func_name, + int force) +{ + if (*func == NULL) { + *funcs = kzalloc(sizeof(**funcs), GFP_ATOMIC); + if (!(*funcs)) + return -ENOMEM; + *func = *funcs; + } else { + (*func)->next = kzalloc(sizeof(**funcs), GFP_ATOMIC); + if (!(*func)->next) + return -ENOMEM; + *func = (*func)->next; + } + (*func)->func_addr = func_addr; + (*func)->func_size = func_size; + (*func)->func_name = func_name; + (*func)->force = force; + (*func)->next = NULL; return 0; } +static void free_func_list(struct klp_func_list **funcs) +{ + struct klp_func_list *p; + + while (*funcs != NULL) { + p = *funcs; + *funcs = (*funcs)->next; + kfree(p); + } +} + +int __weak klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *func_list = NULL; + + ret = arch_klp_check_activeness_func(patch, enable, add_func_to_list, &func_list); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + + if (!func_list) + goto out; + + ret = arch_klp_check_calltrace(check_func_list, (void *)func_list); + +out: + free_func_list(&func_list); + return ret; +} + static LIST_HEAD(klp_func_list); /* -- Gitee From dd16d296d08ea26e47b19e68da869eca10c72a01 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:19 +0800 Subject: [PATCH 14/37] livepatch/arm: Remove duplicate 'struct klp_func_list' related codes hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Now we get a weak type 'klp_check_calltrace' of which implementation is the same as it is in arch/arm, so directly cleanup arm specific 'klp_check_calltrace'. Signed-off-by: Zheng Yejian --- arch/arm/include/asm/livepatch.h | 6 -- arch/arm/kernel/livepatch.c | 116 ++++++------------------------- 2 files changed, 21 insertions(+), 101 deletions(-) diff --git a/arch/arm/include/asm/livepatch.h b/arch/arm/include/asm/livepatch.h index 445a78d83d21..08ff5246f97d 100644 --- a/arch/arm/include/asm/livepatch.h +++ b/arch/arm/include/asm/livepatch.h @@ -34,11 +34,6 @@ struct klp_func; int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func); -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - - #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) #ifdef CONFIG_ARM_MODULE_PLTS @@ -63,7 +58,6 @@ int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); - #endif #endif /* _ASM_ARM_LIVEPATCH_H */ diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 4a5366edade5..78fbac5ecaa9 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -39,7 +39,6 @@ #define ARM_INSN_SIZE 4 #endif -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * ARM_INSN_SIZE) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY @@ -65,22 +64,11 @@ static bool is_jump_insn(u32 insn) } struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; + void *data; int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); }; -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -95,32 +83,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -171,7 +135,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -206,7 +170,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -214,7 +178,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, #endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -225,36 +189,11 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, return 0; } -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return true; - } - funcs = funcs->next; - } - return false; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; + struct walk_stackframe_args *args = ws_args; - return check_func_list(check_funcs, &args->ret, frame->pc); -} - -static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } + return !args->check_func(args->data, &args->ret, frame->pc); } static int do_check_calltrace(struct walk_stackframe_args *args, @@ -287,37 +226,24 @@ static int do_check_calltrace(struct walk_stackframe_args *args, return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .enable = enable, - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, }; - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; - - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); - -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); } -static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data; - if (within_module_core(frame->pc, args->mod)) { - pr_err("module %s is in use!\n", args->mod->name); + if (within_module_core(frame->pc, mod)) { + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); } return 0; @@ -326,7 +252,7 @@ static int check_module_calltrace(struct stackframe *frame, void *data) int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 }; -- Gitee From b1aa0e6fafa31ea8594499d470484f7c080e744b Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:20 +0800 Subject: [PATCH 15/37] livepatch/arm64: Remove duplicate 'struct klp_func_list' related codes hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Now we get a weak type 'klp_check_calltrace' of which implementation is the same as it is in arch/arm64, so directly cleanup arm specific 'klp_check_calltrace'. Signed-off-by: Zheng Yejian --- arch/arm64/include/asm/livepatch.h | 4 - arch/arm64/kernel/livepatch.c | 115 ++++++----------------------- 2 files changed, 21 insertions(+), 98 deletions(-) diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h index c41a22adc944..2bacd12e46b1 100644 --- a/arch/arm64/include/asm/livepatch.h +++ b/arch/arm64/include/asm/livepatch.h @@ -41,9 +41,6 @@ static inline int klp_check_compiler_support(void) int arch_klp_patch_func(struct klp_func *func); void arch_klp_unpatch_func(struct klp_func *func); -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif #else #error Live patching support is disabled; check CONFIG_LIVEPATCH #endif @@ -72,7 +69,6 @@ int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); - #endif #endif /* _ASM_ARM64_LIVEPATCH_H */ diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index e4070c55b8ed..4517f21950c5 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -35,7 +35,6 @@ #include #include -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE static inline bool offset_in_range(unsigned long pc, unsigned long addr, @@ -58,22 +57,11 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr, ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800)) struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; + void *data; int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); }; -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -88,32 +76,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list *)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list *)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -160,7 +124,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -196,7 +160,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -205,7 +169,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -216,36 +180,11 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, return 0; } -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static bool klp_check_jump_func(void *ws_args, unsigned long pc) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - -static bool klp_check_jump_func(void *data, unsigned long pc) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; - - return check_func_list(check_funcs, &args->ret, pc); -} - -static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; + struct walk_stackframe_args *args = ws_args; - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } + return args->check_func(args->data, &args->ret, pc); } static int do_check_calltrace(struct walk_stackframe_args *args, @@ -283,36 +222,24 @@ static int do_check_calltrace(struct walk_stackframe_args *args, return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .enable = enable, - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, }; - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; - - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); } -static bool check_module_calltrace(void *data, unsigned long pc) +static bool check_module_calltrace(void *ws_args, unsigned long pc) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data; - if (within_module_core(pc, args->mod)) { - pr_err("module %s is in use!\n", args->mod->name); + if (within_module_core(pc, mod)) { + pr_err("module %s is in use!\n", mod->name); args->ret = -EBUSY; return false; } @@ -322,7 +249,7 @@ static bool check_module_calltrace(void *data, unsigned long pc) int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 }; -- Gitee From fd1524843bb47c2428d5e200f0d28c775a580fc0 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:21 +0800 Subject: [PATCH 16/37] livepatch/ppc32: Remove duplicate 'struct klp_func_list' related codes hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Now we get a weak type 'klp_check_calltrace' of which implementation is the same as it is in arch/ppc32, so directly cleanup arm specific 'klp_check_calltrace'. Signed-off-by: Zheng Yejian --- arch/powerpc/include/asm/livepatch.h | 5 -- arch/powerpc/kernel/livepatch_32.c | 119 ++++++--------------------- 2 files changed, 23 insertions(+), 101 deletions(-) diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index ae674ea59ab3..f68567bb0a6b 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -139,11 +139,6 @@ int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame); #endif /* CONFIG_LIVEPATCH_FTRACE */ -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY -struct klp_patch; -int klp_check_calltrace(struct klp_patch *patch, int enable); -#endif - static inline void klp_init_thread_info(struct task_struct *p) { /* + 1 to account for STACK_END_MAGIC */ diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 52356621934b..8a3093cb2330 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -31,7 +31,6 @@ #if defined (CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined (CONFIG_LIVEPATCH_WO_FTRACE) -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE #endif @@ -55,22 +54,11 @@ static bool is_jump_insn(u32 insn) } struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; + void *data; int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); }; -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -85,32 +73,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -161,7 +125,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -196,14 +160,14 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; #endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -228,46 +192,21 @@ void notrace klp_walk_stackframe(struct stackframe *frame, } } -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; + struct walk_stackframe_args *args = ws_args; /* check the PC first */ - if (!check_func_list(check_funcs, &args->ret, frame->pc)) + if (!args->check_func(args->data, &args->ret, frame->pc)) return args->ret; /* check NIP when the exception stack switching */ - if (frame->nip && !check_func_list(check_funcs, &args->ret, frame->nip)) + if (frame->nip && !args->check_func(args->data, &args->ret, frame->nip)) return args->ret; return 0; } -static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } -} - static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { @@ -314,53 +253,41 @@ static int do_check_calltrace(struct walk_stackframe_args *args, return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { - .ret = 0 + .data = data, + .ret = 0, + .check_func = check_func, }; - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; - - args.check_funcs = check_funcs; - ret = do_check_calltrace(&args, klp_check_jump_func); - -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); } -static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data; /* check the PC first */ - if (within_module_core(frame->pc, args->mod)) + if (within_module_core(frame->pc, mod)) goto err_out; /* check NIP when the exception stack switching */ - if (frame->nip && within_module_core(frame->nip, args->mod)) + if (frame->nip && within_module_core(frame->nip, mod)) goto err_out; return 0; err_out: - pr_err("module %s is in use!\n", args->mod->name); + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); } int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 }; -- Gitee From 2cc3a8f4eb43c0e9f3007686fde0e1e89547bf35 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:22 +0800 Subject: [PATCH 17/37] livepatch/ppc64: Remove duplicate 'struct klp_func_list' related codes hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Now we get a weak type 'klp_check_calltrace' of which implementation is the same as it is in arch/ppc64, so directly cleanup arm specific 'klp_check_calltrace'. Signed-off-by: Zheng Yejian --- arch/powerpc/kernel/livepatch_64.c | 127 ++++++----------------------- kernel/livepatch/core.c | 2 +- 2 files changed, 27 insertions(+), 102 deletions(-) diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index f2b40da2e120..3b5f8b54669d 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -36,7 +36,6 @@ #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined(CONFIG_LIVEPATCH_WO_FTRACE) -#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE #endif @@ -60,22 +59,11 @@ static bool is_jump_insn(u32 insn) } struct walk_stackframe_args { - int enable; - struct klp_func_list *check_funcs; - struct module *mod; + void *data; int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); }; -static inline unsigned long klp_size_to_check(unsigned long func_size, - int force) -{ - unsigned long size = func_size; - - if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK) - size = MAX_SIZE_TO_CHECK; - return size; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -90,32 +78,8 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) -{ - if (*func == NULL) { - *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), - GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; - return 0; -} - -static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **check_funcs) +int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, + klp_add_func_t add_func, struct klp_func_list **func_list) { int ret; struct klp_object *obj; @@ -169,7 +133,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (IS_ENABLED(CONFIG_PREEMPTION) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func_to_list(check_funcs, &pcheck, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, func->force); if (ret) @@ -183,7 +147,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = ppc_function_entry( (void *)func->new_func); func_size = func->new_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -209,7 +173,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, */ func_addr = (unsigned long)func->old_func; func_size = func->old_size; - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, "OLD_FUNC", 0); if (ret) return ret; @@ -221,7 +185,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)&func_node->arch_data.trampoline; func_size = sizeof(struct ppc64_klp_btramp_entry); - ret = add_func_to_list(check_funcs, &pcheck, func_addr, + ret = add_func(func_list, &pcheck, func_addr, func_size, "trampoline", 0); if (ret) return ret; @@ -247,46 +211,21 @@ static void notrace klp_walk_stackframe(struct stackframe *frame, } } -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); - if (*ret) { - return false; - } - funcs = funcs->next; - } - return true; -} - -static int klp_check_jump_func(struct stackframe *frame, void *data) -{ - struct walk_stackframe_args *args = data; - struct klp_func_list *check_funcs = args->check_funcs; + struct walk_stackframe_args *args = ws_args; /* check the PC first */ - if (!check_func_list(check_funcs, &args->ret, frame->pc)) + if (!args->check_func(args->data, &args->ret, frame->pc)) return args->ret; /* check NIP when the exception stack switching */ - if (frame->nip && !check_func_list(check_funcs, &args->ret, frame->nip)) + if (frame->nip && !args->check_func(args->data, &args->ret, frame->nip)) return args->ret; return 0; } -static void free_list(struct klp_func_list **funcs) -{ - struct klp_func_list *p; - - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); - } -} - static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { @@ -327,8 +266,6 @@ static int do_check_calltrace(struct walk_stackframe_args *args, frame.nip = 0; klp_walk_stackframe(&frame, fn, t, args); if (args->ret) { - pr_debug("%s FAILED when %s\n", __func__, - args->enable ? "enabling" : "disabling"); pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); show_stack(t, NULL, KERN_INFO); return args->ret; @@ -337,53 +274,41 @@ static int do_check_calltrace(struct walk_stackframe_args *args, return 0; } -int klp_check_calltrace(struct klp_patch *patch, int enable) +int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - if (!check_funcs) - goto out; - - args.check_funcs = check_funcs; - args.ret = 0; - args.enable = enable; - ret = do_check_calltrace(&args, klp_check_jump_func); + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; -out: - free_list(&check_funcs); - return ret; + return do_check_calltrace(&args, klp_check_jump_func); } -static int check_module_calltrace(struct stackframe *frame, void *data) +static int check_module_calltrace(struct stackframe *frame, void *ws_args) { - struct walk_stackframe_args *args = data; + struct walk_stackframe_args *args = ws_args; + struct module *mod = args->data; /* check the PC first */ - if (within_module_core(frame->pc, args->mod)) + if (within_module_core(frame->pc, mod)) goto err_out; /* check NIP when the exception stack switching */ - if (frame->nip && within_module_core(frame->nip, args->mod)) + if (frame->nip && within_module_core(frame->nip, mod)) goto err_out; return 0; err_out: - pr_err("module %s is in use!\n", args->mod->name); + pr_err("module %s is in use!\n", mod->name); return (args->ret = -EBUSY); } int arch_klp_module_check_calltrace(void *data) { struct walk_stackframe_args args = { - .mod = (struct module *)data, + .data = data, .ret = 0 }; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 382bb4a73bce..b74472a48409 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1395,7 +1395,7 @@ static void free_func_list(struct klp_func_list **funcs) } } -int __weak klp_check_calltrace(struct klp_patch *patch, int enable) +static int klp_check_calltrace(struct klp_patch *patch, int enable) { int ret = 0; struct klp_func_list *func_list = NULL; -- Gitee From 1eda147bb2f36f6943fe7a531d31ac97c3a68221 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:23 +0800 Subject: [PATCH 18/37] livepatch/x86: Implement arch_klp_check_task_calltrace() hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Abstract arch_klp_check_task_calltrace() which check calltrace of a certain task, this is a prepare for later optimization. Signed-off-by: Zheng Yejian --- arch/x86/kernel/livepatch.c | 80 +++++++++++++++++++++++-------------- 1 file changed, 51 insertions(+), 29 deletions(-) diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index d3a1597f4811..6d036d9ac095 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -235,9 +235,10 @@ static int klp_check_stack(void *trace_ptr, int trace_len, return 0; } -static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +static int check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) { - struct task_struct *g, *t; int ret = 0; static unsigned long trace_entries[MAX_STACK_ENTRIES]; #ifdef CONFIG_ARCH_STACKWALK @@ -246,38 +247,48 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da struct stack_trace trace; #endif +#ifdef CONFIG_ARCH_STACKWALK + ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); + if (ret < 0) { + pr_err("%s:%d has an unreliable stack, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + trace_len = ret; + ret = klp_check_stack(trace_entries, trace_len, fn, data); +#else + trace.skip = 0; + trace.nr_entries = 0; + trace.max_entries = MAX_STACK_ENTRIES; + trace.entries = trace_entries; + ret = save_stack_trace_tsk_reliable(t, &trace); + if (ret) { + pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", + __func__, t->comm, t->pid, ret); + return ret; + } + ret = klp_check_stack(&trace, 0, fn, data); +#endif + if (ret) { + pr_err("%s:%d check stack failed, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + return 0; +} + +static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) +{ + int ret = 0; + struct task_struct *g, *t; + for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) continue; -#ifdef CONFIG_ARCH_STACKWALK - ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); - if (ret < 0) { - pr_err("%s:%d has an unreliable stack, ret=%d\n", - t->comm, t->pid, ret); - return ret; - } - trace_len = ret; - ret = klp_check_stack(trace_entries, trace_len, fn, data); -#else - trace.skip = 0; - trace.nr_entries = 0; - trace.max_entries = MAX_STACK_ENTRIES; - trace.entries = trace_entries; - ret = save_stack_trace_tsk_reliable(t, &trace); - WARN_ON_ONCE(ret == -ENOSYS); - if (ret) { - pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", - __func__, t->comm, t->pid, ret); + ret = check_task_calltrace(t, fn, data); + if (ret) return ret; - } - ret = klp_check_stack(&trace, 0, fn, data); -#endif - if (ret) { - pr_err("%s:%d check stack failed, ret=%d\n", - t->comm, t->pid, ret); - return ret; - } } return 0; @@ -295,6 +306,17 @@ static bool check_module_calltrace(void *data, int *ret, unsigned long pc) return true; } +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) +{ + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, fn, data); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { return do_check_calltrace(check_func, data); -- Gitee From 6c5672b70480f3c43820999cd98d07706f6f6ae4 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:24 +0800 Subject: [PATCH 19/37] livepatch/arm: Implement arch_klp_check_task_calltrace() hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Abstract arch_klp_check_task_calltrace() which check calltrace of a certain task, this is a prepare for later optimization. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 67 ++++++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 19 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 78fbac5ecaa9..683cb43e435f 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -196,36 +196,65 @@ static int klp_check_jump_func(struct stackframe *frame, void *ws_args) return !args->check_func(args->data, &args->ret, frame->pc); } +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) +{ + struct stackframe frame; + + if (t == current) { + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_stack_pointer; + frame.lr = (unsigned long)__builtin_return_address(0); + frame.pc = (unsigned long)check_task_calltrace; + } else { + frame.fp = thread_saved_fp(t); + frame.sp = thread_saved_sp(t); + frame.lr = 0; /* recovered from the stack */ + frame.pc = thread_saved_pc(t); + } + walk_stackframe(&frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - struct stackframe frame; for_each_process_thread(g, t) { - if (t == current) { - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_stack_pointer; - frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)do_check_calltrace; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - frame.fp = thread_saved_fp(t); - frame.sp = thread_saved_sp(t); - frame.lr = 0; /* recovered from the stack */ - frame.pc = thread_saved_pc(t); - } - walk_stackframe(&frame, fn, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; } return 0; } +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { struct walk_stackframe_args args = { -- Gitee From 4a52c68b8dda2e123adf1ef3db0c052c824a4a45 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:25 +0800 Subject: [PATCH 20/37] livepatch/arm64: Implement arch_klp_check_task_calltrace() hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Abstract arch_klp_check_task_calltrace() which check calltrace of a certain task, this is a prepare for later optimization. Signed-off-by: Zheng Yejian --- arch/arm64/kernel/livepatch.c | 77 ++++++++++++++++++++++++----------- 1 file changed, 53 insertions(+), 24 deletions(-) diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 4517f21950c5..53e083eed1ac 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -187,41 +187,70 @@ static bool klp_check_jump_func(void *ws_args, unsigned long pc) return args->check_func(args->data, &args->ret, pc); } +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) +{ + struct stackframe frame; + + /* + * Handle the current carefully on each CPUs, we shouldn't + * use saved FP and PC when backtrace current. It's difficult + * to backtrack other CPU currents here. But fortunately, + * all CPUs will stay in this function, so the current's + * backtrace is so similar + */ + if (t == current) { + /* current on this CPU */ + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.pc = (unsigned long)check_task_calltrace; + } else { + frame.fp = thread_saved_fp(t); + frame.pc = thread_saved_pc(t); + } + start_backtrace(&frame, frame.fp, frame.pc); + walk_stackframe(t, &frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + static int do_check_calltrace(struct walk_stackframe_args *args, bool (*fn)(void *, unsigned long)) { + int ret; struct task_struct *g, *t; - struct stackframe frame; for_each_process_thread(g, t) { - /* - * Handle the current carefully on each CPUs, we shouldn't - * use saved FP and PC when backtrace current. It's difficult - * to backtrack other CPU currents here. But fortunately, - * all CPUs will stay in this function, so the current's - * backtrace is so similar - */ - if (t == current) { - /* current on this CPU */ - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)do_check_calltrace; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - frame.fp = thread_saved_fp(t); - frame.pc = thread_saved_pc(t); - } - start_backtrace(&frame, frame.fp, frame.pc); - walk_stackframe(t, &frame, fn, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; } return 0; } +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { struct walk_stackframe_args args = { -- Gitee From 727009dc2f67c94ba9b17d1cb2e8ecf1180ec931 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:26 +0800 Subject: [PATCH 21/37] livepatch/ppc32: Implement arch_klp_check_task_calltrace() hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Abstract arch_klp_check_task_calltrace() which check calltrace of a certain task, this is a prepare for later optimization. Signed-off-by: Zheng Yejian --- arch/powerpc/kernel/livepatch_32.c | 99 +++++++++++++++++++----------- 1 file changed, 64 insertions(+), 35 deletions(-) diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 8a3093cb2330..7d4ab5fd4114 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -207,52 +207,81 @@ static int klp_check_jump_func(struct stackframe *frame, void *ws_args) return 0; } +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) +{ + struct stackframe frame; + unsigned long *stack; + + if (t == current) { + /* + * Handle the current carefully on each CPUs, we shouldn't + * use saved FP and PC when backtrace current. It's difficult + * to backtrack other CPU currents here. But fortunately, + * all CPUs will stay in this function, so the current's + * backtrace is so similar + */ + stack = (unsigned long *)current_stack_pointer; + } else { + /* + * Skip the first frame since it does not contain lr + * at normal position and nip is stored in the lr + * position in the second frame. + * See arch/powerpc/kernel/entry_32.S _switch . + */ + unsigned long s = *(unsigned long *)t->thread.ksp; + + if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) + return 0; + stack = (unsigned long *)s; + } + + frame.sp = (unsigned long)stack; + frame.pc = stack[STACK_FRAME_LR_SAVE]; + frame.nip = 0; + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - struct stackframe frame; - unsigned long *stack; for_each_process_thread(g, t) { - if (t == current) { - /* - * Handle the current carefully on each CPUs, we shouldn't - * use saved FP and PC when backtrace current. It's difficult - * to backtrack other CPU currents here. But fortunately, - * all CPUs will stay in this function, so the current's - * backtrace is so similar - */ - stack = (unsigned long *)current_stack_pointer; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - /* - * Skip the first frame since it does not contain lr - * at normal position and nip is stored in the lr - * position in the second frame. - * See arch/powerpc/kernel/entry_32.S _switch . - */ - unsigned long s = *(unsigned long *)t->thread.ksp; - - if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) - continue; - stack = (unsigned long *)s; - } - - frame.sp = (unsigned long)stack; - frame.pc = stack[STACK_FRAME_LR_SAVE]; - frame.nip = 0; - klp_walk_stackframe(&frame, fn, t, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; } return 0; } +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { struct walk_stackframe_args args = { -- Gitee From 003103d98a10d67407221a917462c4d00f9bfae4 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:27 +0800 Subject: [PATCH 22/37] livepatch/ppc64: Implement arch_klp_check_task_calltrace() hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Abstract arch_klp_check_task_calltrace() which check calltrace of a certain task, this is a prepare for later optimization. Signed-off-by: Zheng Yejian --- arch/powerpc/kernel/livepatch_64.c | 103 ++++++++++++++++++----------- 1 file changed, 66 insertions(+), 37 deletions(-) diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 3b5f8b54669d..3c412ea51dc9 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -226,54 +226,83 @@ static int klp_check_jump_func(struct stackframe *frame, void *ws_args) return 0; } +static int check_task_calltrace(struct task_struct *t, + struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) +{ + struct stackframe frame; + unsigned long *stack; + + if (t == current) { + /* + * Handle the current carefully on each CPUs, + * we shouldn't use saved FP and PC when + * backtrace current. It's difficult to + * backtrack other CPU currents here. But + * fortunately,all CPUs will stay in this + * function, so the current's backtrace is + * so similar + */ + stack = (unsigned long *)current_stack_pointer; + } else { + /* + * Skip the first frame since it does not contain lr + * at notmal position and nip is store ind the lr + * position in the second frame. + * See arch/powerpc/kernel/entry_64.S _switch . + */ + unsigned long s = *(unsigned long *)t->thread.ksp; + + if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) + return 0; + stack = (unsigned long *)s; + } + + frame.sp = (unsigned long)stack; + frame.pc = stack[STACK_FRAME_LR_SAVE]; + frame.nip = 0; + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; + } + return 0; +} + static int do_check_calltrace(struct walk_stackframe_args *args, int (*fn)(struct stackframe *, void *)) { + int ret; struct task_struct *g, *t; - struct stackframe frame; - unsigned long *stack; for_each_process_thread(g, t) { - if (t == current) { - /* - * Handle the current carefully on each CPUs, - * we shouldn't use saved FP and PC when - * backtrace current. It's difficult to - * backtrack other CPU currents here. But - * fortunately,all CPUs will stay in this - * function, so the current's backtrace is - * so similar - */ - stack = (unsigned long *)current_stack_pointer; - } else if (klp_is_migration_thread(t->comm)) { + if (klp_is_migration_thread(t->comm)) continue; - } else { - /* - * Skip the first frame since it does not contain lr - * at notmal position and nip is store ind the lr - * position in the second frame. - * See arch/powerpc/kernel/entry_64.S _switch . - */ - unsigned long s = *(unsigned long *)t->thread.ksp; - - if (!validate_sp(s, t, STACK_FRAME_OVERHEAD)) - continue; - stack = (unsigned long *)s; - } - - frame.sp = (unsigned long)stack; - frame.pc = stack[STACK_FRAME_LR_SAVE]; - frame.nip = 0; - klp_walk_stackframe(&frame, fn, t, args); - if (args->ret) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - return args->ret; - } + ret = check_task_calltrace(t, args, fn); + if (ret) + return ret; } return 0; } +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +int arch_klp_check_task_calltrace(struct task_struct *t, + bool (*check_func)(void *, int *, unsigned long), + void *data) +{ + struct walk_stackframe_args args = { + .data = data, + .ret = 0, + .check_func = check_func, + }; + + if (t == NULL) + return -EINVAL; + return check_task_calltrace(t, &args, klp_check_jump_func); +} +#endif + int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data) { struct walk_stackframe_args args = { -- Gitee From 09bf001e4eaa34fd72594398f3c520fd272f8379 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:28 +0800 Subject: [PATCH 23/37] livepatch/x86: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- In breakpoint optimization mode, steps of patching an old function are: 1. insert a 'brk' instruction at start address of the old function; 2. replace instructions except first one as instructions of corresponding place in the new function; 3. replace the first 'brk' instruction to be the first instruction of the new function; Currently above 'step 2' and 'step 3' are performed in stop_machine mode, so the order of them are not that important, that is 'step 3' can be performed before 'step 2'. But later we want to move these replace steps outside outside stop_machine mode, then 'step 2' must be performed before 'step 3'. So defaultly adjust the order as 'step 2' before 'step 3' since it also does not affect in stop_machine mode now. Signed-off-by: Zheng Yejian --- arch/x86/kernel/livepatch.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 6d036d9ac095..6c5ad1a82ab0 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -428,6 +428,21 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; } +static void klp_patch_text(void *dst, const void *src, int len) +{ + if (len <= 1) + return; + /* skip breakpoint at first */ + text_poke(dst + 1, src + 1, len - 1); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + /* update jmp opcode */ + text_poke(dst, src, 1); +} + int arch_klp_patch_func(struct klp_func *func) { struct klp_func_node *func_node; @@ -440,15 +455,7 @@ int arch_klp_patch_func(struct klp_func *func) new_addr = (unsigned long)func->new_func; /* replace the text with the new text */ new = (unsigned char *)klp_jmp_code(ip, new_addr); -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY - /* update jmp offset */ - text_poke((void *)(ip + 1), new + 1, JMP_E9_INSN_SIZE - 1); - /* update jmp opcode */ - text_poke((void *)ip, new, 1); -#else - text_poke((void *)ip, new, JMP_E9_INSN_SIZE); -#endif - + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); return 0; } @@ -473,6 +480,6 @@ void arch_klp_unpatch_func(struct klp_func *func) } /* replace the text with the new text */ - text_poke((void *)ip, new, JMP_E9_INSN_SIZE); + klp_patch_text((void *)ip, (const void *)new, JMP_E9_INSN_SIZE); } #endif -- Gitee From f9f6aa7df2f0e4b45ebe296bc971fba76002c5f0 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:29 +0800 Subject: [PATCH 24/37] livepatch/arm: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- In breakpoint optimization mode, steps of patching an old function are: 1. insert a 'brk' instruction at start address of the old function; 2. replace instructions except first one as instructions of corresponding place in the new function; 3. replace the first 'brk' instruction to be the first instruction of the new function; Currently above 'step 2' and 'step 3' are performed in stop_machine mode, so the order of them are not that important, that is 'step 3' can be performed before 'step 2'. But later we want to move these replace steps outside outside stop_machine mode, then 'step 2' must be performed before 'step 3'. So defaultly adjust the order as 'step 2' before 'step 3' since it also does not affect in stop_machine mode now. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 683cb43e435f..d5d94593012a 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -365,14 +365,29 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; } +static void klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + + if (len <= 0) + return; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) + __patch_text(dst + i, src[i]); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + __patch_text(dst, src[0]); +} + static int do_patch(unsigned long pc, unsigned long new_addr) { u32 insns[LJMP_INSN_SIZE]; if (!offset_in_range(pc, new_addr, SZ_32M)) { #ifdef CONFIG_ARM_MODULE_PLTS - int i; - /* * [0] LDR PC, [PC+8] * [4] nop @@ -382,8 +397,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) insns[1] = __opcode_to_mem_arm(0xe320f000); insns[2] = new_addr; - for (i = 0; i < LJMP_INSN_SIZE; i++) - __patch_text(((u32 *)pc) + i, insns[i]); + klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); #else /* * When offset from 'new_addr' to 'pc' is out of SZ_32M range but @@ -394,7 +408,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr) #endif } else { insns[0] = arm_gen_branch(pc, new_addr); - __patch_text((void *)pc, insns[0]); + klp_patch_text((u32 *)pc, insns, 1); } return 0; } @@ -422,11 +436,7 @@ void arch_klp_unpatch_func(struct klp_func *func) pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - int i; - - for (i = 0; i < LJMP_INSN_SIZE; i++) { - __patch_text(((u32 *)pc) + i, func_node->arch_data.old_insns[i]); - } + klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); } else { next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node); -- Gitee From 14cd5be1688f6f5fed813a58eaded58b49e08871 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:30 +0800 Subject: [PATCH 25/37] livepatch/arm64: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- In breakpoint optimization mode, steps of patching an old function are: 1. insert a 'brk' instruction at start address of the old function; 2. replace instructions except first one as instructions of corresponding place in the new function; 3. replace the first 'brk' instruction to be the first instruction of the new function; Currently above 'step 2' and 'step 3' are performed in stop_machine mode, so the order of them are not that important, that is 'step 3' can be performed before 'step 2'. But later we want to move these replace steps outside outside stop_machine mode, then 'step 2' must be performed before 'step 3'. So defaultly adjust the order as 'step 2' before 'step 3' since it also does not affect in stop_machine mode now. Signed-off-by: Zheng Yejian --- arch/arm64/kernel/livepatch.c | 47 ++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 53e083eed1ac..3805e4054d4f 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -340,6 +340,27 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; } +static int klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + int ret; + + if (len <= 0) + return -EINVAL; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) { + ret = aarch64_insn_patch_text_nosync(dst + i, src[i]); + if (ret) + return ret; + } + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + return aarch64_insn_patch_text_nosync(dst, src[0]); +} + static int do_patch(unsigned long pc, unsigned long new_addr) { u32 insns[LJMP_INSN_SIZE]; @@ -348,26 +369,22 @@ static int do_patch(unsigned long pc, unsigned long new_addr) if (offset_in_range(pc, new_addr, SZ_128M)) { insns[0] = aarch64_insn_gen_branch_imm(pc, new_addr, AARCH64_INSN_BRANCH_NOLINK); - ret = aarch64_insn_patch_text_nosync((void *)pc, insns[0]); + ret = klp_patch_text((u32 *)pc, insns, 1); if (ret) { pr_err("patch instruction small range failed, ret=%d\n", ret); return -EPERM; } } else { #ifdef CONFIG_ARM64_MODULE_PLTS - int i; insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5; insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5; insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5; insns[3] = 0xd61f0200; - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]); - if (ret) { - pr_err("patch instruction %d large range failed, ret=%d\n", - i, ret); - return -EPERM; - } + ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("patch instruction large range failed, ret=%d\n", ret); + return -EPERM; } #else /* @@ -399,20 +416,16 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, - func_node->arch_data.old_insns[i]); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - return; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; } } else { next_func = list_first_or_null_rcu(&func_node->func_stack, -- Gitee From 187b838c14ce073c3b4695060faa0666368d0901 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:31 +0800 Subject: [PATCH 26/37] livepatch/ppc32: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- In breakpoint optimization mode, steps of patching an old function are: 1. insert a 'brk' instruction at start address of the old function; 2. replace instructions except first one as instructions of corresponding place in the new function; 3. replace the first 'brk' instruction to be the first instruction of the new function; Currently above 'step 2' and 'step 3' are performed in stop_machine mode, so the order of them are not that important, that is 'step 3' can be performed before 'step 2'. But later we want to move these replace steps outside outside stop_machine mode, then 'step 2' must be performed before 'step 3'. So defaultly adjust the order as 'step 2' before 'step 3' since it also does not affect in stop_machine mode now. Signed-off-by: Zheng Yejian --- arch/powerpc/include/asm/livepatch.h | 1 + arch/powerpc/kernel/livepatch.c | 22 ++++++++++++++++++++++ arch/powerpc/kernel/livepatch_32.c | 26 +++++++++----------------- 3 files changed, 32 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index f68567bb0a6b..d7bc32fb0505 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -136,6 +136,7 @@ void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_module_check_calltrace(void *data); int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame); +int klp_patch_text(u32 *dst, const u32 *src, int len); #endif /* CONFIG_LIVEPATCH_FTRACE */ diff --git a/arch/powerpc/kernel/livepatch.c b/arch/powerpc/kernel/livepatch.c index d568e8c8b16b..a13c853947ff 100644 --- a/arch/powerpc/kernel/livepatch.c +++ b/arch/powerpc/kernel/livepatch.c @@ -112,3 +112,25 @@ int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame) return 0; } + +int klp_patch_text(u32 *dst, const u32 *src, int len) +{ + int i; + int ret; + + if (len <= 0) + return -EINVAL; + /* skip breakpoint at first */ + for (i = 1; i < len; i++) { + ret = patch_instruction((struct ppc_inst *)(dst + i), + ppc_inst(src[i])); + if (ret) + return ret; + } + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + return patch_instruction((struct ppc_inst *)dst, ppc_inst(src[0])); +} diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 7d4ab5fd4114..0018e9c420af 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -351,7 +351,6 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) static int do_patch(unsigned long pc, unsigned long new_addr) { int ret; - int i; u32 insns[LJMP_INSN_SIZE]; if (offset_in_range(pc, new_addr, SZ_32M)) { @@ -375,14 +374,10 @@ static int do_patch(unsigned long pc, unsigned long new_addr) insns[2] = 0x7d8903a6; insns[3] = 0x4e800420; - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), - ppc_inst(insns[i])); - if (ret) { - pr_err("patch instruction %d large range failed, ret=%d\n", - i, ret); - return -EPERM; - } + ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE); + if (ret) { + pr_err("patch instruction large range failed, ret=%d\n", ret); + return -EPERM; } } return 0; @@ -406,20 +401,17 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), - ppc_inst(func_node->arch_data.old_insns[i])); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - return; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, + LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; } } else { next_func = list_first_or_null_rcu(&func_node->func_stack, -- Gitee From eb74e7ca63ccd050d7490e97e8afcd3d06389143 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:32 +0800 Subject: [PATCH 27/37] livepatch/ppc64: Adjust instruction replace order for KLP_STACK_OPTIMIZE mode hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- In breakpoint optimization mode, steps of patching an old function are: 1. insert a 'brk' instruction at start address of the old function; 2. replace instructions except first one as instructions of corresponding place in the new function; 3. replace the first 'brk' instruction to be the first instruction of the new function; Currently above 'step 2' and 'step 3' are performed in stop_machine mode, so the order of them are not that important, that is 'step 3' can be performed before 'step 2'. But later we want to move these replace steps outside outside stop_machine mode, then 'step 2' must be performed before 'step 3'. So defaultly adjust the order as 'step 2' before 'step 3' since it also does not affect in stop_machine mode now. Signed-off-by: Zheng Yejian --- arch/powerpc/kernel/livepatch_64.c | 13 +++++-------- arch/powerpc/kernel/module_64.c | 29 ++++++++++++++++++++--------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 3c412ea51dc9..c4bce597b69b 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -402,20 +402,17 @@ void arch_klp_unpatch_func(struct klp_func *func) struct klp_func_node *func_node; struct klp_func *next_func; unsigned long pc; - int i; int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; list_del_rcu(&func->stack_node); if (list_empty(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) { - ret = patch_instruction((struct ppc_inst *)((u32 *)pc + i), - ppc_inst(func_node->arch_data.old_insns[i])); - if (ret) { - pr_err("restore instruction %d failed, ret=%d\n", i, ret); - break; - } + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, + LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; } pr_debug("[%s %d] restore insns at 0x%lx\n", __func__, __LINE__, pc); diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index ef093691f606..d0e4581b0cf0 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -817,17 +817,18 @@ int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) * Patch jump stub to reference trampoline * without saved the old R2 and load the new R2. */ -static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, +static int livepatch_create_bstub(void *pc, unsigned long addr, struct module *me) { long reladdr; unsigned long my_r2; unsigned long stub_start, stub_end, stub_size; + struct ppc64_klp_bstub_entry entry; /* Stub uses address relative to r2. */ my_r2 = me ? me->arch.toc : kernel_toc_addr(); - reladdr = (unsigned long)entry - my_r2; + reladdr = (unsigned long)pc - my_r2; if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { pr_err("%s: Address %p of jump stub out of range of %p.\n", me ? me->name : "kernel", @@ -839,15 +840,25 @@ static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, stub_start = ppc_function_entry((void *)livepatch_branch_stub); stub_end = ppc_function_entry((void *)livepatch_branch_stub_end); stub_size = stub_end - stub_start; - memcpy(entry->jump, (u32 *)stub_start, stub_size); + memcpy(entry.jump, (u32 *)stub_start, stub_size); + + entry.jump[0] |= PPC_HA(reladdr); + entry.jump[1] |= PPC_LO(reladdr); + entry.magic = BRANCH_STUB_MAGIC; + entry.trampoline = addr; - entry->jump[0] |= PPC_HA(reladdr); - entry->jump[1] |= PPC_LO(reladdr); - entry->magic = BRANCH_STUB_MAGIC; - entry->trampoline = addr; + /* skip breakpoint at first */ + memcpy(pc + PPC64_INSN_SIZE, (void *)&entry + PPC64_INSN_SIZE, + sizeof(entry) - PPC64_INSN_SIZE); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + memcpy(pc, (void *)&entry, PPC64_INSN_SIZE); pr_debug("Create livepatch branch stub 0x%px with reladdr 0x%lx r2 0x%lx to trampoline 0x%lx\n", - (void *)entry, reladdr, my_r2, addr); + pc, reladdr, my_r2, addr); return 1; } @@ -898,7 +909,7 @@ int livepatch_create_branch(unsigned long pc, #endif /* Create stub to trampoline */ - if (!livepatch_create_bstub((struct ppc64_klp_bstub_entry *)pc, trampoline, me)) + if (!livepatch_create_bstub((void *)pc, trampoline, me)) return -EINVAL; return 0; -- Gitee From 467274e2ce0f0380910b79c50f8b213a5dbfbce2 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:33 +0800 Subject: [PATCH 28/37] livepatch/core: No stop machine in KLP_STACK_OPTIMIZE mode hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- After an old function has been placed a 'brk' instruction, every call of that function will jump to new function in exception handler. So we can check calltrace of every task to make sure no one is running in the old function without using stop_machine, then we can directly replace the old function with instructions that will jump to new function. Introduce new config LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE to do this. Signed-off-by: Zheng Yejian --- include/linux/livepatch.h | 4 + kernel/livepatch/Kconfig | 11 +++ kernel/livepatch/core.c | 202 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 208 insertions(+), 9 deletions(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index b4cf90c03d29..7146989b5fbc 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -321,7 +321,11 @@ static inline int klp_module_coming(struct module *mod) { return 0; } static inline void klp_module_going(struct module *mod) {} static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline void klp_update_patch_state(struct task_struct *task) {} +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +void klp_copy_process(struct task_struct *child); +#else static inline void klp_copy_process(struct task_struct *child) {} +#endif static inline bool klp_have_reliable_stack(void) { return true; } #ifndef klp_smp_isb diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index 297ca41c695e..a59cbb6506cb 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig @@ -98,5 +98,16 @@ config LIVEPATCH_RESTRICT_KPROBE We should not patch for the functions where registered with kprobe, and vice versa. Say Y here if you want to check those. + +config LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE + bool "No stop_machine in breakpoint optimization mode" + depends on LIVEPATCH_WO_FTRACE + default n + help + In breakpoint optimization mode, check tasks calltrace + in batches without using stop machine so that reduce the + service downtime. + Say N if you are unsure. + endmenu endif diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index b74472a48409..ebeaf50559ea 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -2018,18 +2018,185 @@ static bool klp_use_breakpoint(struct klp_patch *patch) return true; } -static int klp_breakpoint_optimize(struct klp_patch *patch) +#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE +#include +#include "../sched/sched.h" + +int __weak arch_klp_check_task_calltrace(struct task_struct *t, + bool (*fn)(void *, int *, unsigned long), + void *data) { - int ret; + return -EINVAL; +} + +/* Called from copy_process() during fork */ +void klp_copy_process(struct task_struct *child) +{ + child->patch_state = current->patch_state; +} + +static void set_tasks_patch_state(int patch_state) +{ + unsigned int cpu; + struct task_struct *g, *task; + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + task->patch_state = patch_state; + } + read_unlock(&tasklist_lock); + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + task->patch_state = patch_state; + } + put_online_cpus(); +} + +static void update_patch_state(struct task_struct *task, struct klp_func_list *func_list) +{ + struct rq *rq; + struct rq_flags flags; + + if (task->patch_state == KLP_PATCHED) + return; + WARN_ON_ONCE(task->patch_state != KLP_UNPATCHED); + rq = task_rq_lock(task, &flags); + if (task_running(rq, task) && task != current) + goto done; + if (arch_klp_check_task_calltrace(task, check_func_list, (void *)func_list)) + goto done; + task->patch_state = KLP_PATCHED; +done: + task_rq_unlock(rq, task, &flags); +} + +#ifdef CONFIG_SMP +static void check_task_calltrace_ipi(void *func_list) +{ + if (current->patch_state == KLP_PATCHED) + return; + if (arch_klp_check_task_calltrace(current, check_func_list, func_list)) + return; + current->patch_state = KLP_PATCHED; +} + +static void update_patch_state_ipi(struct klp_func_list *func_list) +{ + unsigned int cpu; + unsigned int curr_cpu; + + preempt_disable(); + curr_cpu = smp_processor_id(); + for_each_online_cpu(cpu) { + if (cpu == curr_cpu) + continue; + smp_call_function_single(cpu, check_task_calltrace_ipi, func_list, 1); + } + preempt_enable(); +} +#endif + +static void update_tasks_patch_state(struct klp_func_list *func_list) +{ + unsigned int cpu; + struct task_struct *g, *task; + + read_lock(&tasklist_lock); + for_each_process_thread(g, task) + update_patch_state(task, func_list); + read_unlock(&tasklist_lock); + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + if (cpu_online(cpu)) { + update_patch_state(task, func_list); + } else if (task->patch_state != KLP_PATCHED) { + /* offline idle tasks can be directly updated */ + task->patch_state = KLP_PATCHED; + } + } + put_online_cpus(); +#ifdef CONFIG_SMP + update_patch_state_ipi(func_list); +#endif +} + +static bool is_patchable(void) +{ + unsigned int cpu; + struct task_struct *g, *task; + int patchable = true; + + get_online_cpus(); + for_each_possible_cpu(cpu) { + task = idle_task(cpu); + WARN_ON_ONCE(task->patch_state == KLP_UNDEFINED); + if (task->patch_state != KLP_PATCHED) { + put_online_cpus(); + return false; + } + } + put_online_cpus(); + read_lock(&tasklist_lock); + for_each_process_thread(g, task) { + WARN_ON_ONCE(task->patch_state == KLP_UNDEFINED); + if (task->patch_state != KLP_PATCHED) { + patchable = false; + goto out_unlock; + } + } +out_unlock: + read_unlock(&tasklist_lock); + return patchable; +} + +static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) +{ + struct klp_func_list *func_list = NULL; + int ret = -EINVAL; int i; - int cnt = 0; + int retry_cnt = 0; - ret = klp_add_breakpoint(patch); + ret = arch_klp_check_activeness_func(patch, true, add_func_to_list, &func_list); if (ret) { - pr_err("failed to add breakpoints, ret=%d\n", ret); - return ret; + pr_err("break optimize collecting active functions failed, ret=%d\n", ret); + goto out; } + set_tasks_patch_state(KLP_UNPATCHED); + + for (i = 0; i < KLP_RETRY_COUNT; i++) { + retry_cnt++; + + update_tasks_patch_state(func_list); + if (is_patchable()) { + arch_klp_code_modify_prepare(); + ret = enable_patch(patch, true); + arch_klp_code_modify_post_process(); + break; + } + ret = -EAGAIN; + pr_notice("try again in %d ms\n", KLP_RETRY_INTERVAL); + msleep(KLP_RETRY_INTERVAL); + } + set_tasks_patch_state(KLP_UNDEFINED); +out: + free_func_list(&func_list); + *cnt = retry_cnt; + return ret; +} + +#else /* !CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE */ + +static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) +{ + int ret = -EINVAL; + int i; + int retry_cnt = 0; + for (i = 0; i < KLP_RETRY_COUNT; i++) { struct patch_data patch_data = { .patch = patch, @@ -2040,7 +2207,7 @@ static int klp_breakpoint_optimize(struct klp_patch *patch) if (i == KLP_RETRY_COUNT - 1) patch_data.rollback = true; - cnt++; + retry_cnt++; arch_klp_code_modify_prepare(); ret = stop_machine(klp_try_enable_patch, &patch_data, @@ -2049,13 +2216,30 @@ static int klp_breakpoint_optimize(struct klp_patch *patch) if (!ret || ret != -EAGAIN) break; - pr_notice("try again in %d ms.\n", KLP_RETRY_INTERVAL); + pr_notice("try again in %d ms\n", KLP_RETRY_INTERVAL); msleep(KLP_RETRY_INTERVAL); } + *cnt = retry_cnt; + return ret; +} +#endif /* CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE */ + +static int klp_breakpoint_optimize(struct klp_patch *patch) +{ + int ret; + int cnt = 0; + + ret = klp_add_breakpoint(patch); + if (ret) { + pr_err("failed to add breakpoints, ret=%d\n", ret); + return ret; + } + + ret = klp_breakpoint_enable_patch(patch, &cnt); + pr_notice("patching %s, tried %d times, ret=%d.\n", ret ? "failed" : "success", cnt, ret); - /* * If the patch is enabled successfully, the breakpoint instruction * has been replaced with the jump instruction. However, if the patch -- Gitee From c5b202bd638b67cfe6a06627bc19723bf2d13b98 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:34 +0800 Subject: [PATCH 29/37] livepatch: Complete check calltrace for running tasks hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- As before, task calltrace checking was assumed being performed under stop_machine and tasks to be checked are all not in running state. So there is one case that old function not needed to check, that is: preemption disabled and 'force' field not be KLP_NORMAL_FORCE and no 'call' instructions in livepatch replace area. But when using breakpoint optimization without stop_machine, tasks may be running, we can not ignore above check. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 1 + arch/arm64/kernel/livepatch.c | 1 + arch/powerpc/kernel/livepatch_32.c | 1 + arch/powerpc/kernel/livepatch_64.c | 1 + arch/x86/kernel/livepatch.c | 1 + 5 files changed, 5 insertions(+) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index d5d94593012a..9afef2f32edc 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -133,6 +133,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 3805e4054d4f..e23e6597fc1e 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -122,6 +122,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 0018e9c420af..b6bba306d525 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -123,6 +123,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index c4bce597b69b..2e9a89a0edfc 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -131,6 +131,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 6c5ad1a82ab0..4c8c1f0802ea 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -120,6 +120,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, * complete. */ if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { ret = add_func(func_list, &pcheck, -- Gitee From 24b33a8ce81f4228e3e35b9b0fc8cf73b08df3a5 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:35 +0800 Subject: [PATCH 30/37] livepatch: Check calltrace of idle tasks hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- for_each_process_thread() only include init_task and its descendant, but not include idle tasks. Idle tasks also require calltraces checking. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 6 ++++++ arch/arm64/kernel/livepatch.c | 6 ++++++ arch/powerpc/kernel/livepatch_32.c | 6 ++++++ arch/powerpc/kernel/livepatch_64.c | 6 ++++++ arch/x86/kernel/livepatch.c | 7 ++++++- 5 files changed, 30 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 9afef2f32edc..38958d378a81 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -228,6 +228,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, { int ret; struct task_struct *g, *t; + unsigned int cpu; for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -236,6 +237,11 @@ static int do_check_calltrace(struct walk_stackframe_args *args, if (ret) return ret; } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } return 0; } diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index e23e6597fc1e..d7ce303a1f6c 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -224,6 +224,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, { int ret; struct task_struct *g, *t; + unsigned int cpu; for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -232,6 +233,11 @@ static int do_check_calltrace(struct walk_stackframe_args *args, if (ret) return ret; } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } return 0; } diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index b6bba306d525..a6f0d6cdea79 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -255,6 +255,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, { int ret; struct task_struct *g, *t; + unsigned int cpu; for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -263,6 +264,11 @@ static int do_check_calltrace(struct walk_stackframe_args *args, if (ret) return ret; } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } return 0; } diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 2e9a89a0edfc..b3f0cf505356 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -276,6 +276,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args, { int ret; struct task_struct *g, *t; + unsigned int cpu; for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -284,6 +285,11 @@ static int do_check_calltrace(struct walk_stackframe_args *args, if (ret) return ret; } + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), args, fn); + if (ret) + return ret; + } return 0; } diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 4c8c1f0802ea..c8da05117f6e 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -282,6 +282,7 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da { int ret = 0; struct task_struct *g, *t; + unsigned int cpu; for_each_process_thread(g, t) { if (klp_is_migration_thread(t->comm)) @@ -291,7 +292,11 @@ static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *da if (ret) return ret; } - + for_each_online_cpu(cpu) { + ret = check_task_calltrace(idle_task(cpu), fn, data); + if (ret) + return ret; + } return 0; } -- Gitee From cecb608bef21e0727252c4f42c41031d5e2e47fd Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:36 +0800 Subject: [PATCH 31/37] livepatch: Organize active functions with struct 'list_head' hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Struct 'klp_func_list' is used to organize active functions with unidirectional linked list, it is too complicated. Here rename it as 'actv_func', and organize with 'list_head' to make code simpler. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 12 ++--- arch/arm64/kernel/livepatch.c | 12 ++--- arch/powerpc/kernel/livepatch_32.c | 10 ++-- arch/powerpc/kernel/livepatch_64.c | 12 ++--- arch/x86/kernel/livepatch.c | 10 ++-- include/linux/livepatch.h | 10 +--- kernel/livepatch/core.c | 79 +++++++++++++++--------------- 7 files changed, 62 insertions(+), 83 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 38958d378a81..2afac130c742 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -84,14 +84,13 @@ static bool check_jump_insn(unsigned long func_addr) } int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; struct klp_func_node *func_node; struct klp_func *func; unsigned long func_addr, func_size; - struct klp_func_list *pcheck = NULL; for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -136,8 +135,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -171,16 +169,14 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; #endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index d7ce303a1f6c..6f5e652c24ec 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -77,14 +77,13 @@ static bool check_jump_insn(unsigned long func_addr) } int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node; - struct klp_func_list *pcheck = NULL; for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -125,8 +124,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -161,8 +159,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -170,8 +167,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index a6f0d6cdea79..e68602c8d097 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -74,14 +74,13 @@ static bool check_jump_insn(unsigned long func_addr) } int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node; - struct klp_func_list *pcheck = NULL; for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -126,8 +125,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -161,14 +159,14 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; #endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index b3f0cf505356..74274f42730a 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -79,14 +79,13 @@ static bool check_jump_insn(unsigned long func_addr) } int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - struct klp_func_list *pcheck = NULL; for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -134,8 +133,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -148,7 +146,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = ppc_function_entry( (void *)func->new_func); func_size = func->new_size; - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -174,7 +172,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, */ func_addr = (unsigned long)func->old_func; func_size = func->old_size; - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, "OLD_FUNC", 0); if (ret) return ret; @@ -186,7 +184,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)&func_node->arch_data.trampoline; func_size = sizeof(struct ppc64_klp_btramp_entry); - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, "trampoline", 0); if (ret) return ret; diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index c8da05117f6e..47135e14bb4e 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -73,14 +73,13 @@ static bool check_jump_insn(unsigned long func_addr) } int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct klp_func_list **func_list) + klp_add_func_t add_func, struct list_head *func_list) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - struct klp_func_list *pcheck = NULL; for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -123,8 +122,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || (func->force == KLP_NORMAL_FORCE) || check_jump_insn(func_addr)) { - ret = add_func(func_list, &pcheck, - func_addr, func_size, + ret = add_func(func_list, func_addr, func_size, func->old_name, func->force); if (ret) return ret; @@ -156,7 +154,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -164,7 +162,7 @@ int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func(func_list, &pcheck, func_addr, + ret = add_func(func_list, func_addr, func_size, func->old_name, 0); if (ret) return ret; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 7146989b5fbc..427485f73793 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -259,15 +259,7 @@ int klp_compare_address(unsigned long pc, unsigned long func_addr, void arch_klp_init(void); int klp_module_delete_safety_check(struct module *mod); -struct klp_func_list { - struct klp_func_list *next; - unsigned long func_addr; - unsigned long func_size; - const char *func_name; - int force; -}; - -typedef int (*klp_add_func_t)(struct klp_func_list **funcs, struct klp_func_list **func, +typedef int (*klp_add_func_t)(struct list_head *func_list, unsigned long func_addr, unsigned long func_size, const char *func_name, int force); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index ebeaf50559ea..d036a2593ce6 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1332,7 +1332,7 @@ int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), vo int __weak arch_klp_check_activeness_func(struct klp_patch *patch, int enable, klp_add_func_t add_func, - struct klp_func_list **func_list) + struct list_head *func_list) { return -EINVAL; } @@ -1347,58 +1347,59 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } +struct actv_func { + struct list_head list; + unsigned long func_addr; + unsigned long func_size; + const char *func_name; + int force; +}; + static bool check_func_list(void *data, int *ret, unsigned long pc) { - struct klp_func_list *funcs = (struct klp_func_list *)data; + struct list_head *func_list = (struct list_head *)data; + struct actv_func *func = NULL; - while (funcs != NULL) { - *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, - klp_size_to_check(funcs->func_size, funcs->force)); + list_for_each_entry(func, func_list, list) { + *ret = klp_compare_address(pc, func->func_addr, func->func_name, + klp_size_to_check(func->func_size, func->force)); if (*ret) return false; - funcs = funcs->next; } return true; } -static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, - unsigned long func_addr, unsigned long func_size, const char *func_name, - int force) +static int add_func_to_list(struct list_head *func_list, unsigned long func_addr, + unsigned long func_size, const char *func_name, + int force) { - if (*func == NULL) { - *funcs = kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*funcs)) - return -ENOMEM; - *func = *funcs; - } else { - (*func)->next = kzalloc(sizeof(**funcs), GFP_ATOMIC); - if (!(*func)->next) - return -ENOMEM; - *func = (*func)->next; - } - (*func)->func_addr = func_addr; - (*func)->func_size = func_size; - (*func)->func_name = func_name; - (*func)->force = force; - (*func)->next = NULL; + struct actv_func *func = kzalloc(sizeof(struct actv_func), GFP_ATOMIC); + + if (!func) + return -ENOMEM; + func->func_addr = func_addr; + func->func_size = func_size; + func->func_name = func_name; + func->force = force; + list_add_tail(&func->list, func_list); return 0; } -static void free_func_list(struct klp_func_list **funcs) +static void free_func_list(struct list_head *func_list) { - struct klp_func_list *p; + struct actv_func *func = NULL; + struct actv_func *tmp = NULL; - while (*funcs != NULL) { - p = *funcs; - *funcs = (*funcs)->next; - kfree(p); + list_for_each_entry_safe(func, tmp, func_list, list) { + list_del(&func->list); + kfree(func); } } static int klp_check_calltrace(struct klp_patch *patch, int enable) { int ret = 0; - struct klp_func_list *func_list = NULL; + LIST_HEAD(func_list); ret = arch_klp_check_activeness_func(patch, enable, add_func_to_list, &func_list); if (ret) { @@ -1406,10 +1407,10 @@ static int klp_check_calltrace(struct klp_patch *patch, int enable) goto out; } - if (!func_list) + if (list_empty(&func_list)) goto out; - ret = arch_klp_check_calltrace(check_func_list, (void *)func_list); + ret = arch_klp_check_calltrace(check_func_list, (void *)&func_list); out: free_func_list(&func_list); @@ -2054,7 +2055,7 @@ static void set_tasks_patch_state(int patch_state) put_online_cpus(); } -static void update_patch_state(struct task_struct *task, struct klp_func_list *func_list) +static void update_patch_state(struct task_struct *task, struct list_head *func_list) { struct rq *rq; struct rq_flags flags; @@ -2082,7 +2083,7 @@ static void check_task_calltrace_ipi(void *func_list) current->patch_state = KLP_PATCHED; } -static void update_patch_state_ipi(struct klp_func_list *func_list) +static void update_patch_state_ipi(struct list_head *func_list) { unsigned int cpu; unsigned int curr_cpu; @@ -2098,7 +2099,7 @@ static void update_patch_state_ipi(struct klp_func_list *func_list) } #endif -static void update_tasks_patch_state(struct klp_func_list *func_list) +static void update_tasks_patch_state(struct list_head *func_list) { unsigned int cpu; struct task_struct *g, *task; @@ -2155,7 +2156,7 @@ static bool is_patchable(void) static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) { - struct klp_func_list *func_list = NULL; + LIST_HEAD(func_list); int ret = -EINVAL; int i; int retry_cnt = 0; @@ -2171,7 +2172,7 @@ static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) for (i = 0; i < KLP_RETRY_COUNT; i++) { retry_cnt++; - update_tasks_patch_state(func_list); + update_tasks_patch_state(&func_list); if (is_patchable()) { arch_klp_code_modify_prepare(); ret = enable_patch(patch, true); -- Gitee From 8a6008901db34269dc644b529eca9e89ca0e3c7e Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:37 +0800 Subject: [PATCH 32/37] livepatch: Fix huge_depth in arch_klp_check_activeness_func() hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Fix cmetrics warning like below: cmetrics-the depth of the method {arch_klp_check_activeness_func()} is 6, it is over 4 At the same time, arch_klp_check_activeness_func() in x86/arm/arm64/ppc32 are almost the same, so move it out of arch and reduce duplicate codes. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 105 +--------------- arch/arm64/kernel/livepatch.c | 103 +-------------- arch/powerpc/kernel/livepatch_32.c | 105 +--------------- arch/powerpc/kernel/livepatch_64.c | 193 ++++++++++++++--------------- arch/x86/kernel/livepatch.c | 102 +-------------- kernel/livepatch/core.c | 114 ++++++++++++++++- 6 files changed, 205 insertions(+), 517 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 2afac130c742..3379fbf16dd4 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -69,7 +69,7 @@ struct walk_stackframe_args { bool (*check_func)(void *data, int *ret, unsigned long pc); }; -static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; @@ -83,109 +83,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct list_head *func_list) -{ - int ret; - struct klp_object *obj; - struct klp_func_node *func_node; - struct klp_func *func; - unsigned long func_addr, func_size; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - if (enable) { - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of intructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func(func_list, func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; -#endif - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func(func_list, func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - static int klp_check_jump_func(struct stackframe *frame, void *ws_args) { struct walk_stackframe_args *args = ws_args; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 6f5e652c24ec..6675569c8a4b 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -62,7 +62,7 @@ struct walk_stackframe_args { bool (*check_func)(void *data, int *ret, unsigned long pc); }; -static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; @@ -76,107 +76,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct list_head *func_list) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr, func_size; - struct klp_func_node *func_node; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - if (enable) { - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [the active one] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) { - return -EINVAL; - } -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func(func_list, func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; -#endif - - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func(func_list, func_addr, func_size, - func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - static bool klp_check_jump_func(void *ws_args, unsigned long pc) { struct walk_stackframe_args *args = ws_args; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index e68602c8d097..e45cd3deae08 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -59,7 +59,7 @@ struct walk_stackframe_args { bool (*check_func)(void *data, int *ret, unsigned long pc); }; -static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; u32 *insn = (u32*)func_addr; @@ -73,109 +73,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct list_head *func_list) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr, func_size; - struct klp_func_node *func_node; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - if (enable) { - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemtion is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the previously - * patched function and the function itself - * which to be unpatched. - */ - func_node = klp_find_func_node(func->old_func); - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; -#endif - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - void notrace klp_walk_stackframe(struct stackframe *frame, int (*fn)(struct stackframe *, void *), struct task_struct *tsk, void *data) diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 74274f42730a..413ecb5f206a 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -78,120 +78,109 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, +int arch_klp_check_activeness_func(struct klp_func *func, int enable, klp_add_func_t add_func, struct list_head *func_list) { int ret; - struct klp_object *obj; - struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - func_node = klp_find_func_node(func->old_func); - - /* Check func address in stack */ - if (enable) { - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - if (!func_node || - list_empty(&func_node->func_stack)) { - /* - * No patched on this function - * [ the origin one ] - */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [ the active one ] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = ppc_function_entry( - (void *)prev->new_func); - func_size = prev->new_size; - } - /* - * When preemption is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be repalced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the function itself - * which to be unpatched. - */ - func_addr = ppc_function_entry( - (void *)func->new_func); - func_size = func->new_size; - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } - -#ifdef PPC64_ELF_ABI_v1 + func_node = klp_find_func_node(func->old_func); + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently + * active functions. + */ + if (!func_node || list_empty(&func_node->func_stack)) { /* - * Check trampoline in stack - * new_func callchain: - * old_func - * -=> trampoline - * -=> new_func - * so, we should check all the func in the callchain + * No patched on this function + * [ the origin one ] */ - if (func_addr != (unsigned long)func->old_func) { + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function + * [ the active one ] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = ppc_function_entry((void *)prev->new_func); + func_size = prev->new_size; + } + /* + * When preemption is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of instructions to be repalced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || + (func->force == KLP_NORMAL_FORCE) || + check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + } + } else { + /* + * When disable, check for the function itself + * which to be unpatched. + */ + func_addr = ppc_function_entry((void *)func->new_func); + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + +#ifdef PPC64_ELF_ABI_v1 + /* + * Check trampoline in stack + * new_func callchain: + * old_func + * -=> trampoline + * -=> new_func + * so, we should check all the func in the callchain + */ + if (func_addr != (unsigned long)func->old_func) { #ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - ret = add_func(func_list, func_addr, - func_size, "OLD_FUNC", 0); - if (ret) - return ret; + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + ret = add_func(func_list, func_addr, + func_size, "OLD_FUNC", 0); + if (ret) + return ret; #endif - if (func_node == NULL || - func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) - continue; - - func_addr = (unsigned long)&func_node->arch_data.trampoline; - func_size = sizeof(struct ppc64_klp_btramp_entry); - ret = add_func(func_list, func_addr, - func_size, "trampoline", 0); - if (ret) - return ret; - } -#endif - } + if (func_node == NULL || + func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) + return 0; + + func_addr = (unsigned long)&func_node->arch_data.trampoline; + func_size = sizeof(struct ppc64_klp_btramp_entry); + ret = add_func(func_list, func_addr, + func_size, "trampoline", 0); + if (ret) + return ret; } +#endif return 0; } diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 47135e14bb4e..99b72629637d 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -52,7 +52,7 @@ static bool is_jump_insn(u8 *insn) return false; } -static bool check_jump_insn(unsigned long func_addr) +bool arch_check_jump_insn(unsigned long func_addr) { int len = JMP_E9_INSN_SIZE; struct insn insn; @@ -72,106 +72,6 @@ static bool check_jump_insn(unsigned long func_addr) return false; } -int arch_klp_check_activeness_func(struct klp_patch *patch, int enable, - klp_add_func_t add_func, struct list_head *func_list) -{ - int ret; - struct klp_object *obj; - struct klp_func *func; - unsigned long func_addr, func_size; - struct klp_func_node *func_node = NULL; - - for (obj = patch->objs; obj->funcs; obj++) { - for (func = obj->funcs; func->old_name; func++) { - func_node = klp_find_func_node(func->old_func); - - /* Check func address in stack */ - if (enable) { - if (func->patched || func->force == KLP_ENFORCEMENT) - continue; - /* - * When enable, checking the currently - * active functions. - */ - if (!func_node || - list_empty(&func_node->func_stack)) { - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - /* - * Previously patched function - * [the active one] - */ - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - /* - * When preemtion is disabled and the - * replacement area does not contain a jump - * instruction, the migration thread is - * scheduled to run stop machine only after the - * excution of instructions to be replaced is - * complete. - */ - if (IS_ENABLED(CONFIG_PREEMPTION) || - IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || - (func->force == KLP_NORMAL_FORCE) || - check_jump_insn(func_addr)) { - ret = add_func(func_list, func_addr, func_size, - func->old_name, func->force); - if (ret) - return ret; - } - } else { - /* - * When disable, check for the function - * itself which to be unpatched. - */ - if (!func_node) - return -EINVAL; -#ifdef CONFIG_PREEMPTION - /* - * No scheduling point in the replacement - * instructions. Therefore, when preemption is - * not enabled, atomic execution is performed - * and these instructions will not appear on - * the stack. - */ - if (list_is_singular(&func_node->func_stack)) { - func_addr = (unsigned long)func->old_func; - func_size = func->old_size; - } else { - struct klp_func *prev; - - prev = list_first_or_null_rcu( - &func_node->func_stack, - struct klp_func, stack_node); - func_addr = (unsigned long)prev->new_func; - func_size = prev->new_size; - } - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; -#endif - - func_addr = (unsigned long)func->new_func; - func_size = func->new_size; - ret = add_func(func_list, func_addr, - func_size, func->old_name, 0); - if (ret) - return ret; - } - } - } - return 0; -} - static void klp_print_stack_trace(void *trace_ptr, int trace_len) { int i; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index d036a2593ce6..659f5eb73034 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1330,11 +1330,98 @@ int __weak arch_klp_check_calltrace(bool (*fn)(void *, int *, unsigned long), vo return -EINVAL; } -int __weak arch_klp_check_activeness_func(struct klp_patch *patch, int enable, +bool __weak arch_check_jump_insn(unsigned long func_addr) +{ + return true; +} + +int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, klp_add_func_t add_func, struct list_head *func_list) { - return -EINVAL; + int ret; + unsigned long func_addr, func_size; + struct klp_func_node *func_node = NULL; + + func_node = klp_find_func_node(func->old_func); + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently active functions. + */ + if (!func_node || + list_empty(&func_node->func_stack)) { + /* + * Not patched on this function [the origin one] + */ + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function [the active one] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + /* + * When preemption is disabled and the replacement area + * does not contain a jump instruction, the migration + * thread is scheduled to run stop machine only after the + * execution of instructions to be replaced is complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || + (func->force == KLP_NORMAL_FORCE) || + arch_check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + } + } else { + /* + * When disable, check for the function itself which to be unpatched. + */ + if (!func_node) + return -EINVAL; +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement instructions. Therefore, + * when preemption is not enabled, atomic execution is performed + * and these instructions will not appear on the stack. + */ + if (list_is_singular(&func_node->func_stack)) { + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + struct klp_func *prev; + + prev = list_first_or_null_rcu( + &func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; +#endif + + func_addr = (unsigned long)func->new_func; + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + return 0; } static inline unsigned long klp_size_to_check(unsigned long func_size, @@ -1396,12 +1483,31 @@ static void free_func_list(struct list_head *func_list) } } +static int klp_check_activeness_func(struct klp_patch *patch, int enable, + struct list_head *func_list) +{ + int ret; + struct klp_object *obj = NULL; + struct klp_func *func = NULL; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + ret = arch_klp_check_activeness_func(func, enable, + add_func_to_list, + func_list); + if (ret) + return ret; + } + } + return 0; +} + static int klp_check_calltrace(struct klp_patch *patch, int enable) { int ret = 0; LIST_HEAD(func_list); - ret = arch_klp_check_activeness_func(patch, enable, add_func_to_list, &func_list); + ret = klp_check_activeness_func(patch, enable, &func_list); if (ret) { pr_err("collect active functions failed, ret=%d\n", ret); goto out; @@ -2161,7 +2267,7 @@ static int klp_breakpoint_enable_patch(struct klp_patch *patch, int *cnt) int i; int retry_cnt = 0; - ret = arch_klp_check_activeness_func(patch, true, add_func_to_list, &func_list); + ret = klp_check_activeness_func(patch, true, &func_list); if (ret) { pr_err("break optimize collecting active functions failed, ret=%d\n", ret); goto out; -- Gitee From 0bb0d43f3b408689ebc1b8a9b5bcc0db925f3e2e Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:38 +0800 Subject: [PATCH 33/37] livepatch: Use func->func_node directly hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- When run into arch_klp_check_activeness_func(), func_node corresponding to func->old_func has been stored in func->func_node and it must be valid, So no need to find func_node again or validate it again. __klp_enable_patch() klp_mem_prepare() func_node_alloc // 1. Alloc func->func_node for func->old_func klp_try_enable_patch() klp_check_calltrace() arch_klp_check_activeness_func() // 2. Access func_node found by func->old_func klp_breakpoint_optimize() klp_breakpoint_enable_patch() ... arch_klp_check_activeness_func() // 3. Access func_node found by func->old_func Signed-off-by: Zheng Yejian --- arch/powerpc/kernel/livepatch_64.c | 7 +++---- include/linux/livepatch.h | 1 - kernel/livepatch/core.c | 12 +++--------- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 413ecb5f206a..6455be098a23 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -85,7 +85,7 @@ int arch_klp_check_activeness_func(struct klp_func *func, int enable, unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - func_node = klp_find_func_node(func->old_func); + func_node = func->func_node; /* Check func address in stack */ if (enable) { if (func->patched || func->force == KLP_ENFORCEMENT) @@ -94,7 +94,7 @@ int arch_klp_check_activeness_func(struct klp_func *func, int enable, * When enable, checking the currently * active functions. */ - if (!func_node || list_empty(&func_node->func_stack)) { + if (list_empty(&func_node->func_stack)) { /* * No patched on this function * [ the origin one ] @@ -169,8 +169,7 @@ int arch_klp_check_activeness_func(struct klp_func *func, int enable, return ret; #endif - if (func_node == NULL || - func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) + if (func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) return 0; func_addr = (unsigned long)&func_node->arch_data.trampoline; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 427485f73793..38d707b9b4e1 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -239,7 +239,6 @@ struct klp_func_node { void *brk_func; }; -struct klp_func_node *klp_find_func_node(const void *old_func); void klp_add_func_node(struct klp_func_node *func_node); void klp_del_func_node(struct klp_func_node *func_node); void *klp_get_brk_func(void *addr); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 659f5eb73034..f11266674efe 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1343,7 +1343,7 @@ int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - func_node = klp_find_func_node(func->old_func); + func_node = func->func_node; /* Check func address in stack */ if (enable) { if (func->patched || func->force == KLP_ENFORCEMENT) @@ -1351,8 +1351,7 @@ int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, /* * When enable, checking the currently active functions. */ - if (!func_node || - list_empty(&func_node->func_stack)) { + if (list_empty(&func_node->func_stack)) { /* * Not patched on this function [the origin one] */ @@ -1385,11 +1384,6 @@ int __weak arch_klp_check_activeness_func(struct klp_func *func, int enable, return ret; } } else { - /* - * When disable, check for the function itself which to be unpatched. - */ - if (!func_node) - return -EINVAL; #ifdef CONFIG_PREEMPTION /* * No scheduling point in the replacement instructions. Therefore, @@ -1529,7 +1523,7 @@ static LIST_HEAD(klp_func_list); * The caller must ensure that the klp_mutex lock is held or is in the rcu read * critical area. */ -struct klp_func_node *klp_find_func_node(const void *old_func) +static struct klp_func_node *klp_find_func_node(const void *old_func) { struct klp_func_node *func_node; -- Gitee From f04ab5b7e210955c33842f52640d30e4a1703616 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:39 +0800 Subject: [PATCH 34/37] livepatch/core: Make several functions to be static hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- klp_add_func_node() & klp_del_func_node() should be static. Signed-off-by: Zheng Yejian --- include/linux/livepatch.h | 2 -- kernel/livepatch/core.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 38d707b9b4e1..b11d4afed635 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -239,8 +239,6 @@ struct klp_func_node { void *brk_func; }; -void klp_add_func_node(struct klp_func_node *func_node); -void klp_del_func_node(struct klp_func_node *func_node); void *klp_get_brk_func(void *addr); static inline diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index f11266674efe..511f5a71e994 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1536,12 +1536,12 @@ static struct klp_func_node *klp_find_func_node(const void *old_func) return NULL; } -void klp_add_func_node(struct klp_func_node *func_node) +static void klp_add_func_node(struct klp_func_node *func_node) { list_add_rcu(&func_node->node, &klp_func_list); } -void klp_del_func_node(struct klp_func_node *func_node) +static void klp_del_func_node(struct klp_func_node *func_node) { list_del_rcu(&func_node->node); } -- Gitee From 73d0c719948f5467653ba5fb2f33c42b8034af4b Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:40 +0800 Subject: [PATCH 35/37] livepatch: Fix warning C_RULE_ID_SINGLE_BRANCH_IF_AND_LOOP_BRACKET hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Remove bracket in single branch of if statement. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 3 +-- arch/arm64/kernel/livepatch.c | 3 +-- arch/powerpc/kernel/livepatch_32.c | 3 +-- arch/powerpc/kernel/livepatch_64.c | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 3379fbf16dd4..a5f4c770990f 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -75,9 +75,8 @@ bool arch_check_jump_insn(unsigned long func_addr) u32 *insn = (u32*)func_addr; for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 6675569c8a4b..258f1dcda945 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -68,9 +68,8 @@ bool arch_check_jump_insn(unsigned long func_addr) u32 *insn = (u32*)func_addr; for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index e45cd3deae08..02c28c40024f 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -65,9 +65,8 @@ bool arch_check_jump_insn(unsigned long func_addr) u32 *insn = (u32*)func_addr; for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 6455be098a23..43f8999aaf4b 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -70,9 +70,8 @@ static bool check_jump_insn(unsigned long func_addr) u32 *insn = (u32*)func_addr; for (i = 0; i < CHECK_JUMP_RANGE; i++) { - if (is_jump_insn(*insn)) { + if (is_jump_insn(*insn)) return true; - } insn++; } return false; -- Gitee From c3f8508d22483a9989ae3b5a2f1358d26b9f97b5 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:41 +0800 Subject: [PATCH 36/37] livepatch: Reduce duplicate definition of 'struct walk_stackframe_args' hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- Definition of 'struct walk_stackframe_args' are the same in arm/arm64/powerpc32/powerpc64, so move it into include/linux/livepatch.h. Signed-off-by: Zheng Yejian --- arch/arm/kernel/livepatch.c | 6 ------ arch/arm64/kernel/livepatch.c | 6 ------ arch/powerpc/kernel/livepatch_32.c | 6 ------ arch/powerpc/kernel/livepatch_64.c | 6 ------ include/linux/livepatch.h | 6 ++++++ 5 files changed, 6 insertions(+), 24 deletions(-) diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index a5f4c770990f..f37cc04b4cae 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -63,12 +63,6 @@ static bool is_jump_insn(u32 insn) return false; } -struct walk_stackframe_args { - void *data; - int ret; - bool (*check_func)(void *data, int *ret, unsigned long pc); -}; - bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 258f1dcda945..363fb8e41c49 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -56,12 +56,6 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr, ((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \ ((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800)) -struct walk_stackframe_args { - void *data; - int ret; - bool (*check_func)(void *data, int *ret, unsigned long pc); -}; - bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 02c28c40024f..fdebfa995eca 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -53,12 +53,6 @@ static bool is_jump_insn(u32 insn) return false; } -struct walk_stackframe_args { - void *data; - int ret; - bool (*check_func)(void *data, int *ret, unsigned long pc); -}; - bool arch_check_jump_insn(unsigned long func_addr) { unsigned long i; diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 43f8999aaf4b..315c1db23c7e 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -58,12 +58,6 @@ static bool is_jump_insn(u32 insn) return false; } -struct walk_stackframe_args { - void *data; - int ret; - bool (*check_func)(void *data, int *ret, unsigned long pc); -}; - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index b11d4afed635..a5ef153bae8d 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -260,6 +260,12 @@ typedef int (*klp_add_func_t)(struct list_head *func_list, unsigned long func_addr, unsigned long func_size, const char *func_name, int force); +struct walk_stackframe_args { + void *data; + int ret; + bool (*check_func)(void *data, int *ret, unsigned long pc); +}; + #endif int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, -- Gitee From 005a0c086abe1091403fa9af698b20d178e3b035 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 16 Nov 2022 14:47:42 +0800 Subject: [PATCH 37/37] livepatch: Fix compile error when CONFIG_LIVEPATCH_WO_FTRACE disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I60NAZ CVE: NA -------------------------------- When disable CONFIG_LIVEPATCH_WO_FTRACE, compiler report following error: kernel/livepatch/core.c: In function ‘check_address_conflict’: kernel/livepatch/core.c:1214:18: error: ‘KLP_MAX_REPLACE_SIZE’ undeclared (first use in this function) 1214 | end = start + KLP_MAX_REPLACE_SIZE - 1; | ^~~~~~~~~~~~~~~~~~~~ kernel/livepatch/core.c:1214:18: note: each undeclared identifier is reported only once for each function it appears in At top level: kernel/livepatch/core.c:1195:12: warning: ‘check_address_conflict’ defined but not used [-Wunused-function] 1195 | static int check_address_conflict(struct klp_patch *patch) | ^~~~~~~~~~~~~~~~~~~~~~ Fixes: af56c7290f29 ("[Huawei] livepatch/x86: Avoid conflict with static {call,key}") Fixes: aba04b9defb0 ("[Huawei] livepatch/core: Restrict minimum size of function that can be patched") Signed-off-by: Zheng Yejian --- kernel/livepatch/core.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 511f5a71e994..c6e23e8daf64 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1041,11 +1041,13 @@ static int klp_init_object_loaded(struct klp_patch *patch, func->old_name); return -ENOENT; } +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY if (func->old_size < KLP_MAX_REPLACE_SIZE) { pr_err("%s size less than limit (%lu < %zu)\n", func->old_name, func->old_size, KLP_MAX_REPLACE_SIZE); return -EINVAL; } +#endif #ifdef PPC64_ELF_ABI_v1 /* @@ -1195,6 +1197,7 @@ extern int klp_static_call_register(struct module *mod); static inline int klp_static_call_register(struct module *mod) { return 0; } #endif +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY static int check_address_conflict(struct klp_patch *patch) { struct klp_object *obj; @@ -1231,6 +1234,7 @@ static int check_address_conflict(struct klp_patch *patch) } return 0; } +#endif static int klp_init_patch(struct klp_patch *patch) { @@ -1278,11 +1282,11 @@ static int klp_init_patch(struct klp_patch *patch) } module_enable_ro(patch->mod, true); +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY ret = check_address_conflict(patch); if (ret) return ret; -#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY klp_for_each_object(patch, obj) klp_load_hook(obj); #endif -- Gitee