diff options
| author | Josh Poimboeuf <jpoimboe@redhat.com> | 2017-02-13 20:42:37 -0500 |
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2017-03-08 03:23:40 -0500 |
| commit | c349cdcaba589fb49cf105093ebc695eb8b9ff08 (patch) | |
| tree | 0ab3165baae80dbfed236b5e2c23afd3d14ba14d /kernel | |
| parent | aa82dc3e00da63751bb9dfab26983037b79fc39d (diff) | |
livepatch: move patching functions into patch.c
Move functions related to the actual patching of functions and objects
into a new patch.c file.
Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Acked-by: Miroslav Benes <mbenes@suse.cz>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/livepatch/Makefile | 2 | ||||
| -rw-r--r-- | kernel/livepatch/core.c | 202 | ||||
| -rw-r--r-- | kernel/livepatch/patch.c | 213 | ||||
| -rw-r--r-- | kernel/livepatch/patch.h | 32 |
4 files changed, 247 insertions, 202 deletions
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile index e8780c0901d9..e136dad8ff7e 100644 --- a/kernel/livepatch/Makefile +++ b/kernel/livepatch/Makefile | |||
| @@ -1,3 +1,3 @@ | |||
| 1 | obj-$(CONFIG_LIVEPATCH) += livepatch.o | 1 | obj-$(CONFIG_LIVEPATCH) += livepatch.o |
| 2 | 2 | ||
| 3 | livepatch-objs := core.o | 3 | livepatch-objs := core.o patch.o |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 47ed643a6362..6a137e1f4490 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
| @@ -24,32 +24,13 @@ | |||
| 24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
| 25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | #include <linux/ftrace.h> | ||
| 28 | #include <linux/list.h> | 27 | #include <linux/list.h> |
| 29 | #include <linux/kallsyms.h> | 28 | #include <linux/kallsyms.h> |
| 30 | #include <linux/livepatch.h> | 29 | #include <linux/livepatch.h> |
| 31 | #include <linux/elf.h> | 30 | #include <linux/elf.h> |
| 32 | #include <linux/moduleloader.h> | 31 | #include <linux/moduleloader.h> |
| 33 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
| 34 | 33 | #include "patch.h" | |
| 35 | /** | ||
| 36 | * struct klp_ops - structure for tracking registered ftrace ops structs | ||
| 37 | * | ||
| 38 | * A single ftrace_ops is shared between all enabled replacement functions | ||
| 39 | * (klp_func structs) which have the same old_addr. This allows the switch | ||
| 40 | * between function versions to happen instantaneously by updating the klp_ops | ||
| 41 | * struct's func_stack list. The winner is the klp_func at the top of the | ||
| 42 | * func_stack (front of the list). | ||
| 43 | * | ||
| 44 | * @node: node for the global klp_ops list | ||
| 45 | * @func_stack: list head for the stack of klp_func's (active func is on top) | ||
| 46 | * @fops: registered ftrace ops struct | ||
| 47 | */ | ||
| 48 | struct klp_ops { | ||
| 49 | struct list_head node; | ||
| 50 | struct list_head func_stack; | ||
| 51 | struct ftrace_ops fops; | ||
| 52 | }; | ||
| 53 | 34 | ||
| 54 | /* | 35 | /* |
| 55 | * The klp_mutex protects the global lists and state transitions of any | 36 | * The klp_mutex protects the global lists and state transitions of any |
| @@ -60,28 +41,12 @@ struct klp_ops { | |||
| 60 | static DEFINE_MUTEX(klp_mutex); | 41 | static DEFINE_MUTEX(klp_mutex); |
| 61 | 42 | ||
| 62 | static LIST_HEAD(klp_patches); | 43 | static LIST_HEAD(klp_patches); |
| 63 | static LIST_HEAD(klp_ops); | ||
| 64 | 44 | ||
| 65 | static struct kobject *klp_root_kobj; | 45 | static struct kobject *klp_root_kobj; |
| 66 | 46 | ||
| 67 | /* TODO: temporary stub */ | 47 | /* TODO: temporary stub */ |
| 68 | void klp_update_patch_state(struct task_struct *task) {} | 48 | void klp_update_patch_state(struct task_struct *task) {} |
| 69 | 49 | ||
| 70 | static struct klp_ops *klp_find_ops(unsigned long old_addr) | ||
| 71 | { | ||
| 72 | struct klp_ops *ops; | ||
| 73 | struct klp_func *func; | ||
| 74 | |||
| 75 | list_for_each_entry(ops, &klp_ops, node) { | ||
| 76 | func = list_first_entry(&ops->func_stack, struct klp_func, | ||
| 77 | stack_node); | ||
| 78 | if (func->old_addr == old_addr) | ||
| 79 | return ops; | ||
| 80 | } | ||
| 81 | |||
| 82 | return NULL; | ||
| 83 | } | ||
| 84 | |||
| 85 | static bool klp_is_module(struct klp_object *obj) | 50 | static bool klp_is_module(struct klp_object *obj) |
| 86 | { | 51 | { |
| 87 | return obj->name; | 52 | return obj->name; |
| @@ -314,171 +279,6 @@ static int klp_write_object_relocations(struct module *pmod, | |||
| 314 | return ret; | 279 | return ret; |
| 315 | } | 280 | } |
| 316 | 281 | ||
| 317 | static void notrace klp_ftrace_handler(unsigned long ip, | ||
| 318 | unsigned long parent_ip, | ||
| 319 | struct ftrace_ops *fops, | ||
| 320 | struct pt_regs *regs) | ||
| 321 | { | ||
| 322 | struct klp_ops *ops; | ||
| 323 | struct klp_func *func; | ||
| 324 | |||
| 325 | ops = container_of(fops, struct klp_ops, fops); | ||
| 326 | |||
| 327 | rcu_read_lock(); | ||
| 328 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | ||
| 329 | stack_node); | ||
| 330 | if (WARN_ON_ONCE(!func)) | ||
| 331 | goto unlock; | ||
| 332 | |||
| 333 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | ||
| 334 | unlock: | ||
| 335 | rcu_read_unlock(); | ||
| 336 | } | ||
| 337 | |||
| 338 | /* | ||
| 339 | * Convert a function address into the appropriate ftrace location. | ||
| 340 | * | ||
| 341 | * Usually this is just the address of the function, but on some architectures | ||
| 342 | * it's more complicated so allow them to provide a custom behaviour. | ||
| 343 | */ | ||
| 344 | #ifndef klp_get_ftrace_location | ||
| 345 | static unsigned long klp_get_ftrace_location(unsigned long faddr) | ||
| 346 | { | ||
| 347 | return faddr; | ||
| 348 | } | ||
| 349 | #endif | ||
| 350 | |||
| 351 | static void klp_unpatch_func(struct klp_func *func) | ||
| 352 | { | ||
| 353 | struct klp_ops *ops; | ||
| 354 | |||
| 355 | if (WARN_ON(!func->patched)) | ||
| 356 | return; | ||
| 357 | if (WARN_ON(!func->old_addr)) | ||
| 358 | return; | ||
| 359 | |||
| 360 | ops = klp_find_ops(func->old_addr); | ||
| 361 | if (WARN_ON(!ops)) | ||
| 362 | return; | ||
| 363 | |||
| 364 | if (list_is_singular(&ops->func_stack)) { | ||
| 365 | unsigned long ftrace_loc; | ||
| 366 | |||
| 367 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
| 368 | if (WARN_ON(!ftrace_loc)) | ||
| 369 | return; | ||
| 370 | |||
| 371 | WARN_ON(unregister_ftrace_function(&ops->fops)); | ||
| 372 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); | ||
| 373 | |||
| 374 | list_del_rcu(&func->stack_node); | ||
| 375 | list_del(&ops->node); | ||
| 376 | kfree(ops); | ||
| 377 | } else { | ||
| 378 | list_del_rcu(&func->stack_node); | ||
| 379 | } | ||
| 380 | |||
| 381 | func->patched = false; | ||
| 382 | } | ||
| 383 | |||
| 384 | static int klp_patch_func(struct klp_func *func) | ||
| 385 | { | ||
| 386 | struct klp_ops *ops; | ||
| 387 | int ret; | ||
| 388 | |||
| 389 | if (WARN_ON(!func->old_addr)) | ||
| 390 | return -EINVAL; | ||
| 391 | |||
| 392 | if (WARN_ON(func->patched)) | ||
| 393 | return -EINVAL; | ||
| 394 | |||
| 395 | ops = klp_find_ops(func->old_addr); | ||
| 396 | if (!ops) { | ||
| 397 | unsigned long ftrace_loc; | ||
| 398 | |||
| 399 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
| 400 | if (!ftrace_loc) { | ||
| 401 | pr_err("failed to find location for function '%s'\n", | ||
| 402 | func->old_name); | ||
| 403 | return -EINVAL; | ||
| 404 | } | ||
| 405 | |||
| 406 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
| 407 | if (!ops) | ||
| 408 | return -ENOMEM; | ||
| 409 | |||
| 410 | ops->fops.func = klp_ftrace_handler; | ||
| 411 | ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | | ||
| 412 | FTRACE_OPS_FL_DYNAMIC | | ||
| 413 | FTRACE_OPS_FL_IPMODIFY; | ||
| 414 | |||
| 415 | list_add(&ops->node, &klp_ops); | ||
| 416 | |||
| 417 | INIT_LIST_HEAD(&ops->func_stack); | ||
| 418 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
| 419 | |||
| 420 | ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); | ||
| 421 | if (ret) { | ||
| 422 | pr_err("failed to set ftrace filter for function '%s' (%d)\n", | ||
| 423 | func->old_name, ret); | ||
| 424 | goto err; | ||
| 425 | } | ||
| 426 | |||
| 427 | ret = register_ftrace_function(&ops->fops); | ||
| 428 | if (ret) { | ||
| 429 | pr_err("failed to register ftrace handler for function '%s' (%d)\n", | ||
| 430 | func->old_name, ret); | ||
| 431 | ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); | ||
| 432 | goto err; | ||
| 433 | } | ||
| 434 | |||
| 435 | |||
| 436 | } else { | ||
| 437 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
| 438 | } | ||
| 439 | |||
| 440 | func->patched = true; | ||
| 441 | |||
| 442 | return 0; | ||
| 443 | |||
| 444 | err: | ||
| 445 | list_del_rcu(&func->stack_node); | ||
| 446 | list_del(&ops->node); | ||
| 447 | kfree(ops); | ||
| 448 | return ret; | ||
| 449 | } | ||
| 450 | |||
| 451 | static void klp_unpatch_object(struct klp_object *obj) | ||
| 452 | { | ||
| 453 | struct klp_func *func; | ||
| 454 | |||
| 455 | klp_for_each_func(obj, func) | ||
| 456 | if (func->patched) | ||
| 457 | klp_unpatch_func(func); | ||
| 458 | |||
| 459 | obj->patched = false; | ||
| 460 | } | ||
| 461 | |||
| 462 | static int klp_patch_object(struct klp_object *obj) | ||
| 463 | { | ||
| 464 | struct klp_func *func; | ||
| 465 | int ret; | ||
| 466 | |||
| 467 | if (WARN_ON(obj->patched)) | ||
| 468 | return -EINVAL; | ||
| 469 | |||
| 470 | klp_for_each_func(obj, func) { | ||
| 471 | ret = klp_patch_func(func); | ||
| 472 | if (ret) { | ||
| 473 | klp_unpatch_object(obj); | ||
| 474 | return ret; | ||
| 475 | } | ||
| 476 | } | ||
| 477 | obj->patched = true; | ||
| 478 | |||
| 479 | return 0; | ||
| 480 | } | ||
| 481 | |||
| 482 | static int __klp_disable_patch(struct klp_patch *patch) | 282 | static int __klp_disable_patch(struct klp_patch *patch) |
| 483 | { | 283 | { |
| 484 | struct klp_object *obj; | 284 | struct klp_object *obj; |
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c new file mode 100644 index 000000000000..5efa2620851a --- /dev/null +++ b/kernel/livepatch/patch.c | |||
| @@ -0,0 +1,213 @@ | |||
| 1 | /* | ||
| 2 | * patch.c - livepatch patching functions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> | ||
| 5 | * Copyright (C) 2014 SUSE | ||
| 6 | * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; either version 2 | ||
| 11 | * of the License, or (at your option) any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License | ||
| 19 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 23 | |||
| 24 | #include <linux/livepatch.h> | ||
| 25 | #include <linux/list.h> | ||
| 26 | #include <linux/ftrace.h> | ||
| 27 | #include <linux/rculist.h> | ||
| 28 | #include <linux/slab.h> | ||
| 29 | #include <linux/bug.h> | ||
| 30 | #include <linux/printk.h> | ||
| 31 | #include "patch.h" | ||
| 32 | |||
| 33 | static LIST_HEAD(klp_ops); | ||
| 34 | |||
| 35 | struct klp_ops *klp_find_ops(unsigned long old_addr) | ||
| 36 | { | ||
| 37 | struct klp_ops *ops; | ||
| 38 | struct klp_func *func; | ||
| 39 | |||
| 40 | list_for_each_entry(ops, &klp_ops, node) { | ||
| 41 | func = list_first_entry(&ops->func_stack, struct klp_func, | ||
| 42 | stack_node); | ||
| 43 | if (func->old_addr == old_addr) | ||
| 44 | return ops; | ||
| 45 | } | ||
| 46 | |||
| 47 | return NULL; | ||
| 48 | } | ||
| 49 | |||
| 50 | static void notrace klp_ftrace_handler(unsigned long ip, | ||
| 51 | unsigned long parent_ip, | ||
| 52 | struct ftrace_ops *fops, | ||
| 53 | struct pt_regs *regs) | ||
| 54 | { | ||
| 55 | struct klp_ops *ops; | ||
| 56 | struct klp_func *func; | ||
| 57 | |||
| 58 | ops = container_of(fops, struct klp_ops, fops); | ||
| 59 | |||
| 60 | rcu_read_lock(); | ||
| 61 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, | ||
| 62 | stack_node); | ||
| 63 | if (WARN_ON_ONCE(!func)) | ||
| 64 | goto unlock; | ||
| 65 | |||
| 66 | klp_arch_set_pc(regs, (unsigned long)func->new_func); | ||
| 67 | unlock: | ||
| 68 | rcu_read_unlock(); | ||
| 69 | } | ||
| 70 | |||
| 71 | /* | ||
| 72 | * Convert a function address into the appropriate ftrace location. | ||
| 73 | * | ||
| 74 | * Usually this is just the address of the function, but on some architectures | ||
| 75 | * it's more complicated so allow them to provide a custom behaviour. | ||
| 76 | */ | ||
| 77 | #ifndef klp_get_ftrace_location | ||
| 78 | static unsigned long klp_get_ftrace_location(unsigned long faddr) | ||
| 79 | { | ||
| 80 | return faddr; | ||
| 81 | } | ||
| 82 | #endif | ||
| 83 | |||
| 84 | static void klp_unpatch_func(struct klp_func *func) | ||
| 85 | { | ||
| 86 | struct klp_ops *ops; | ||
| 87 | |||
| 88 | if (WARN_ON(!func->patched)) | ||
| 89 | return; | ||
| 90 | if (WARN_ON(!func->old_addr)) | ||
| 91 | return; | ||
| 92 | |||
| 93 | ops = klp_find_ops(func->old_addr); | ||
| 94 | if (WARN_ON(!ops)) | ||
| 95 | return; | ||
| 96 | |||
| 97 | if (list_is_singular(&ops->func_stack)) { | ||
| 98 | unsigned long ftrace_loc; | ||
| 99 | |||
| 100 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
| 101 | if (WARN_ON(!ftrace_loc)) | ||
| 102 | return; | ||
| 103 | |||
| 104 | WARN_ON(unregister_ftrace_function(&ops->fops)); | ||
| 105 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); | ||
| 106 | |||
| 107 | list_del_rcu(&func->stack_node); | ||
| 108 | list_del(&ops->node); | ||
| 109 | kfree(ops); | ||
| 110 | } else { | ||
| 111 | list_del_rcu(&func->stack_node); | ||
| 112 | } | ||
| 113 | |||
| 114 | func->patched = false; | ||
| 115 | } | ||
| 116 | |||
| 117 | static int klp_patch_func(struct klp_func *func) | ||
| 118 | { | ||
| 119 | struct klp_ops *ops; | ||
| 120 | int ret; | ||
| 121 | |||
| 122 | if (WARN_ON(!func->old_addr)) | ||
| 123 | return -EINVAL; | ||
| 124 | |||
| 125 | if (WARN_ON(func->patched)) | ||
| 126 | return -EINVAL; | ||
| 127 | |||
| 128 | ops = klp_find_ops(func->old_addr); | ||
| 129 | if (!ops) { | ||
| 130 | unsigned long ftrace_loc; | ||
| 131 | |||
| 132 | ftrace_loc = klp_get_ftrace_location(func->old_addr); | ||
| 133 | if (!ftrace_loc) { | ||
| 134 | pr_err("failed to find location for function '%s'\n", | ||
| 135 | func->old_name); | ||
| 136 | return -EINVAL; | ||
| 137 | } | ||
| 138 | |||
| 139 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | ||
| 140 | if (!ops) | ||
| 141 | return -ENOMEM; | ||
| 142 | |||
| 143 | ops->fops.func = klp_ftrace_handler; | ||
| 144 | ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | | ||
| 145 | FTRACE_OPS_FL_DYNAMIC | | ||
| 146 | FTRACE_OPS_FL_IPMODIFY; | ||
| 147 | |||
| 148 | list_add(&ops->node, &klp_ops); | ||
| 149 | |||
| 150 | INIT_LIST_HEAD(&ops->func_stack); | ||
| 151 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
| 152 | |||
| 153 | ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); | ||
| 154 | if (ret) { | ||
| 155 | pr_err("failed to set ftrace filter for function '%s' (%d)\n", | ||
| 156 | func->old_name, ret); | ||
| 157 | goto err; | ||
| 158 | } | ||
| 159 | |||
| 160 | ret = register_ftrace_function(&ops->fops); | ||
| 161 | if (ret) { | ||
| 162 | pr_err("failed to register ftrace handler for function '%s' (%d)\n", | ||
| 163 | func->old_name, ret); | ||
| 164 | ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); | ||
| 165 | goto err; | ||
| 166 | } | ||
| 167 | |||
| 168 | |||
| 169 | } else { | ||
| 170 | list_add_rcu(&func->stack_node, &ops->func_stack); | ||
| 171 | } | ||
| 172 | |||
| 173 | func->patched = true; | ||
| 174 | |||
| 175 | return 0; | ||
| 176 | |||
| 177 | err: | ||
| 178 | list_del_rcu(&func->stack_node); | ||
| 179 | list_del(&ops->node); | ||
| 180 | kfree(ops); | ||
| 181 | return ret; | ||
| 182 | } | ||
| 183 | |||
| 184 | void klp_unpatch_object(struct klp_object *obj) | ||
| 185 | { | ||
| 186 | struct klp_func *func; | ||
| 187 | |||
| 188 | klp_for_each_func(obj, func) | ||
| 189 | if (func->patched) | ||
| 190 | klp_unpatch_func(func); | ||
| 191 | |||
| 192 | obj->patched = false; | ||
| 193 | } | ||
| 194 | |||
| 195 | int klp_patch_object(struct klp_object *obj) | ||
| 196 | { | ||
| 197 | struct klp_func *func; | ||
| 198 | int ret; | ||
| 199 | |||
| 200 | if (WARN_ON(obj->patched)) | ||
| 201 | return -EINVAL; | ||
| 202 | |||
| 203 | klp_for_each_func(obj, func) { | ||
| 204 | ret = klp_patch_func(func); | ||
| 205 | if (ret) { | ||
| 206 | klp_unpatch_object(obj); | ||
| 207 | return ret; | ||
| 208 | } | ||
| 209 | } | ||
| 210 | obj->patched = true; | ||
| 211 | |||
| 212 | return 0; | ||
| 213 | } | ||
diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h new file mode 100644 index 000000000000..2d0cce02dade --- /dev/null +++ b/kernel/livepatch/patch.h | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | #ifndef _LIVEPATCH_PATCH_H | ||
| 2 | #define _LIVEPATCH_PATCH_H | ||
| 3 | |||
| 4 | #include <linux/livepatch.h> | ||
| 5 | #include <linux/list.h> | ||
| 6 | #include <linux/ftrace.h> | ||
| 7 | |||
| 8 | /** | ||
| 9 | * struct klp_ops - structure for tracking registered ftrace ops structs | ||
| 10 | * | ||
| 11 | * A single ftrace_ops is shared between all enabled replacement functions | ||
| 12 | * (klp_func structs) which have the same old_addr. This allows the switch | ||
| 13 | * between function versions to happen instantaneously by updating the klp_ops | ||
| 14 | * struct's func_stack list. The winner is the klp_func at the top of the | ||
| 15 | * func_stack (front of the list). | ||
| 16 | * | ||
| 17 | * @node: node for the global klp_ops list | ||
| 18 | * @func_stack: list head for the stack of klp_func's (active func is on top) | ||
| 19 | * @fops: registered ftrace ops struct | ||
| 20 | */ | ||
| 21 | struct klp_ops { | ||
| 22 | struct list_head node; | ||
| 23 | struct list_head func_stack; | ||
| 24 | struct ftrace_ops fops; | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct klp_ops *klp_find_ops(unsigned long old_addr); | ||
| 28 | |||
| 29 | int klp_patch_object(struct klp_object *obj); | ||
| 30 | void klp_unpatch_object(struct klp_object *obj); | ||
| 31 | |||
| 32 | #endif /* _LIVEPATCH_PATCH_H */ | ||
