diff options
Diffstat (limited to 'kernel/kprobes.c')
| -rw-r--r-- | kernel/kprobes.c | 681 | 
1 files changed, 563 insertions, 118 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b7df302a0204..fa034d29cf73 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c  | |||
| @@ -42,8 +42,11 @@ | |||
| 42 | #include <linux/freezer.h> | 42 | #include <linux/freezer.h> | 
| 43 | #include <linux/seq_file.h> | 43 | #include <linux/seq_file.h> | 
| 44 | #include <linux/debugfs.h> | 44 | #include <linux/debugfs.h> | 
| 45 | #include <linux/sysctl.h> | ||
| 45 | #include <linux/kdebug.h> | 46 | #include <linux/kdebug.h> | 
| 46 | #include <linux/memory.h> | 47 | #include <linux/memory.h> | 
| 48 | #include <linux/ftrace.h> | ||
| 49 | #include <linux/cpu.h> | ||
| 47 | 50 | ||
| 48 | #include <asm-generic/sections.h> | 51 | #include <asm-generic/sections.h> | 
| 49 | #include <asm/cacheflush.h> | 52 | #include <asm/cacheflush.h> | 
| @@ -93,6 +96,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = { | |||
| 93 | {"native_get_debugreg",}, | 96 | {"native_get_debugreg",}, | 
| 94 | {"irq_entries_start",}, | 97 | {"irq_entries_start",}, | 
| 95 | {"common_interrupt",}, | 98 | {"common_interrupt",}, | 
| 99 | {"mcount",}, /* mcount can be called from everywhere */ | ||
| 96 | {NULL} /* Terminator */ | 100 | {NULL} /* Terminator */ | 
| 97 | }; | 101 | }; | 
| 98 | 102 | ||
| @@ -103,81 +107,74 @@ static struct kprobe_blackpoint kprobe_blacklist[] = { | |||
| 103 | * stepping on the instruction on a vmalloced/kmalloced/data page | 107 | * stepping on the instruction on a vmalloced/kmalloced/data page | 
| 104 | * is a recipe for disaster | 108 | * is a recipe for disaster | 
| 105 | */ | 109 | */ | 
| 106 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) | ||
| 107 | |||
| 108 | struct kprobe_insn_page { | 110 | struct kprobe_insn_page { | 
| 109 | struct list_head list; | 111 | struct list_head list; | 
| 110 | kprobe_opcode_t *insns; /* Page of instruction slots */ | 112 | kprobe_opcode_t *insns; /* Page of instruction slots */ | 
| 111 | char slot_used[INSNS_PER_PAGE]; | ||
| 112 | int nused; | 113 | int nused; | 
| 113 | int ngarbage; | 114 | int ngarbage; | 
| 115 | char slot_used[]; | ||
| 114 | }; | 116 | }; | 
| 115 | 117 | ||
| 118 | #define KPROBE_INSN_PAGE_SIZE(slots) \ | ||
| 119 | (offsetof(struct kprobe_insn_page, slot_used) + \ | ||
| 120 | (sizeof(char) * (slots))) | ||
| 121 | |||
| 122 | struct kprobe_insn_cache { | ||
| 123 | struct list_head pages; /* list of kprobe_insn_page */ | ||
| 124 | size_t insn_size; /* size of instruction slot */ | ||
| 125 | int nr_garbage; | ||
| 126 | }; | ||
| 127 | |||
| 128 | static int slots_per_page(struct kprobe_insn_cache *c) | ||
| 129 | { | ||
| 130 | return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); | ||
| 131 | } | ||
| 132 | |||
| 116 | enum kprobe_slot_state { | 133 | enum kprobe_slot_state { | 
| 117 | SLOT_CLEAN = 0, | 134 | SLOT_CLEAN = 0, | 
| 118 | SLOT_DIRTY = 1, | 135 | SLOT_DIRTY = 1, | 
| 119 | SLOT_USED = 2, | 136 | SLOT_USED = 2, | 
| 120 | }; | 137 | }; | 
| 121 | 138 | ||
| 122 | static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ | 139 | static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */ | 
| 123 | static LIST_HEAD(kprobe_insn_pages); | 140 | static struct kprobe_insn_cache kprobe_insn_slots = { | 
| 124 | static int kprobe_garbage_slots; | 141 | .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), | 
| 125 | static int collect_garbage_slots(void); | 142 | .insn_size = MAX_INSN_SIZE, | 
| 126 | 143 | .nr_garbage = 0, | |
| 127 | static int __kprobes check_safety(void) | 144 | }; | 
| 128 | { | 145 | static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c); | 
| 129 | int ret = 0; | ||
| 130 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) | ||
| 131 | ret = freeze_processes(); | ||
| 132 | if (ret == 0) { | ||
| 133 | struct task_struct *p, *q; | ||
| 134 | do_each_thread(p, q) { | ||
| 135 | if (p != current && p->state == TASK_RUNNING && | ||
| 136 | p->pid != 0) { | ||
| 137 | printk("Check failed: %s is running\n",p->comm); | ||
| 138 | ret = -1; | ||
| 139 | goto loop_end; | ||
| 140 | } | ||
| 141 | } while_each_thread(p, q); | ||
| 142 | } | ||
| 143 | loop_end: | ||
| 144 | thaw_processes(); | ||
| 145 | #else | ||
| 146 | synchronize_sched(); | ||
| 147 | #endif | ||
| 148 | return ret; | ||
| 149 | } | ||
| 150 | 146 | ||
| 151 | /** | 147 | /** | 
| 152 | * __get_insn_slot() - Find a slot on an executable page for an instruction. | 148 | * __get_insn_slot() - Find a slot on an executable page for an instruction. | 
| 153 | * We allocate an executable page if there's no room on existing ones. | 149 | * We allocate an executable page if there's no room on existing ones. | 
| 154 | */ | 150 | */ | 
| 155 | static kprobe_opcode_t __kprobes *__get_insn_slot(void) | 151 | static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) | 
| 156 | { | 152 | { | 
| 157 | struct kprobe_insn_page *kip; | 153 | struct kprobe_insn_page *kip; | 
| 158 | 154 | ||
| 159 | retry: | 155 | retry: | 
| 160 | list_for_each_entry(kip, &kprobe_insn_pages, list) { | 156 | list_for_each_entry(kip, &c->pages, list) { | 
| 161 | if (kip->nused < INSNS_PER_PAGE) { | 157 | if (kip->nused < slots_per_page(c)) { | 
| 162 | int i; | 158 | int i; | 
| 163 | for (i = 0; i < INSNS_PER_PAGE; i++) { | 159 | for (i = 0; i < slots_per_page(c); i++) { | 
| 164 | if (kip->slot_used[i] == SLOT_CLEAN) { | 160 | if (kip->slot_used[i] == SLOT_CLEAN) { | 
| 165 | kip->slot_used[i] = SLOT_USED; | 161 | kip->slot_used[i] = SLOT_USED; | 
| 166 | kip->nused++; | 162 | kip->nused++; | 
| 167 | return kip->insns + (i * MAX_INSN_SIZE); | 163 | return kip->insns + (i * c->insn_size); | 
| 168 | } | 164 | } | 
| 169 | } | 165 | } | 
| 170 | /* Surprise! No unused slots. Fix kip->nused. */ | 166 | /* kip->nused is broken. Fix it. */ | 
| 171 | kip->nused = INSNS_PER_PAGE; | 167 | kip->nused = slots_per_page(c); | 
| 168 | WARN_ON(1); | ||
| 172 | } | 169 | } | 
| 173 | } | 170 | } | 
| 174 | 171 | ||
| 175 | /* If there are any garbage slots, collect it and try again. */ | 172 | /* If there are any garbage slots, collect it and try again. */ | 
| 176 | if (kprobe_garbage_slots && collect_garbage_slots() == 0) { | 173 | if (c->nr_garbage && collect_garbage_slots(c) == 0) | 
| 177 | goto retry; | 174 | goto retry; | 
| 178 | } | 175 | |
| 179 | /* All out of space. Need to allocate a new page. Use slot 0. */ | 176 | /* All out of space. Need to allocate a new page. */ | 
| 180 | kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); | 177 | kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); | 
| 181 | if (!kip) | 178 | if (!kip) | 
| 182 | return NULL; | 179 | return NULL; | 
| 183 | 180 | ||
| @@ -192,20 +189,23 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void) | |||
| 192 | return NULL; | 189 | return NULL; | 
| 193 | } | 190 | } | 
| 194 | INIT_LIST_HEAD(&kip->list); | 191 | INIT_LIST_HEAD(&kip->list); | 
| 195 | list_add(&kip->list, &kprobe_insn_pages); | 192 | memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); | 
| 196 | memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); | ||
| 197 | kip->slot_used[0] = SLOT_USED; | 193 | kip->slot_used[0] = SLOT_USED; | 
| 198 | kip->nused = 1; | 194 | kip->nused = 1; | 
| 199 | kip->ngarbage = 0; | 195 | kip->ngarbage = 0; | 
| 196 | list_add(&kip->list, &c->pages); | ||
| 200 | return kip->insns; | 197 | return kip->insns; | 
| 201 | } | 198 | } | 
| 202 | 199 | ||
| 200 | |||
| 203 | kprobe_opcode_t __kprobes *get_insn_slot(void) | 201 | kprobe_opcode_t __kprobes *get_insn_slot(void) | 
| 204 | { | 202 | { | 
| 205 | kprobe_opcode_t *ret; | 203 | kprobe_opcode_t *ret = NULL; | 
| 204 | |||
| 206 | mutex_lock(&kprobe_insn_mutex); | 205 | mutex_lock(&kprobe_insn_mutex); | 
| 207 | ret = __get_insn_slot(); | 206 | ret = __get_insn_slot(&kprobe_insn_slots); | 
| 208 | mutex_unlock(&kprobe_insn_mutex); | 207 | mutex_unlock(&kprobe_insn_mutex); | 
| 208 | |||
| 209 | return ret; | 209 | return ret; | 
| 210 | } | 210 | } | 
| 211 | 211 | ||
| @@ -221,7 +221,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) | |||
| 221 | * so as not to have to set it up again the | 221 | * so as not to have to set it up again the | 
| 222 | * next time somebody inserts a probe. | 222 | * next time somebody inserts a probe. | 
| 223 | */ | 223 | */ | 
| 224 | if (!list_is_singular(&kprobe_insn_pages)) { | 224 | if (!list_is_singular(&kip->list)) { | 
| 225 | list_del(&kip->list); | 225 | list_del(&kip->list); | 
| 226 | module_free(NULL, kip->insns); | 226 | module_free(NULL, kip->insns); | 
| 227 | kfree(kip); | 227 | kfree(kip); | 
| @@ -231,52 +231,84 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) | |||
| 231 | return 0; | 231 | return 0; | 
| 232 | } | 232 | } | 
| 233 | 233 | ||
| 234 | static int __kprobes collect_garbage_slots(void) | 234 | static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) | 
| 235 | { | 235 | { | 
| 236 | struct kprobe_insn_page *kip, *next; | 236 | struct kprobe_insn_page *kip, *next; | 
| 237 | 237 | ||
| 238 | /* Ensure no-one is preepmted on the garbages */ | 238 | /* Ensure no-one is interrupted on the garbages */ | 
| 239 | if (check_safety()) | 239 | synchronize_sched(); | 
| 240 | return -EAGAIN; | ||
| 241 | 240 | ||
| 242 | list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { | 241 | list_for_each_entry_safe(kip, next, &c->pages, list) { | 
| 243 | int i; | 242 | int i; | 
| 244 | if (kip->ngarbage == 0) | 243 | if (kip->ngarbage == 0) | 
| 245 | continue; | 244 | continue; | 
| 246 | kip->ngarbage = 0; /* we will collect all garbages */ | 245 | kip->ngarbage = 0; /* we will collect all garbages */ | 
| 247 | for (i = 0; i < INSNS_PER_PAGE; i++) { | 246 | for (i = 0; i < slots_per_page(c); i++) { | 
| 248 | if (kip->slot_used[i] == SLOT_DIRTY && | 247 | if (kip->slot_used[i] == SLOT_DIRTY && | 
| 249 | collect_one_slot(kip, i)) | 248 | collect_one_slot(kip, i)) | 
| 250 | break; | 249 | break; | 
| 251 | } | 250 | } | 
| 252 | } | 251 | } | 
| 253 | kprobe_garbage_slots = 0; | 252 | c->nr_garbage = 0; | 
| 254 | return 0; | 253 | return 0; | 
| 255 | } | 254 | } | 
| 256 | 255 | ||
| 257 | void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | 256 | static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, | 
| 257 | kprobe_opcode_t *slot, int dirty) | ||
| 258 | { | 258 | { | 
| 259 | struct kprobe_insn_page *kip; | 259 | struct kprobe_insn_page *kip; | 
| 260 | 260 | ||
| 261 | mutex_lock(&kprobe_insn_mutex); | 261 | list_for_each_entry(kip, &c->pages, list) { | 
| 262 | list_for_each_entry(kip, &kprobe_insn_pages, list) { | 262 | long idx = ((long)slot - (long)kip->insns) / c->insn_size; | 
| 263 | if (kip->insns <= slot && | 263 | if (idx >= 0 && idx < slots_per_page(c)) { | 
| 264 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { | 264 | WARN_ON(kip->slot_used[idx] != SLOT_USED); | 
| 265 | int i = (slot - kip->insns) / MAX_INSN_SIZE; | ||
| 266 | if (dirty) { | 265 | if (dirty) { | 
| 267 | kip->slot_used[i] = SLOT_DIRTY; | 266 | kip->slot_used[idx] = SLOT_DIRTY; | 
| 268 | kip->ngarbage++; | 267 | kip->ngarbage++; | 
| 268 | if (++c->nr_garbage > slots_per_page(c)) | ||
| 269 | collect_garbage_slots(c); | ||
| 269 | } else | 270 | } else | 
| 270 | collect_one_slot(kip, i); | 271 | collect_one_slot(kip, idx); | 
| 271 | break; | 272 | return; | 
| 272 | } | 273 | } | 
| 273 | } | 274 | } | 
| 275 | /* Could not free this slot. */ | ||
| 276 | WARN_ON(1); | ||
| 277 | } | ||
| 274 | 278 | ||
| 275 | if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) | 279 | void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | 
| 276 | collect_garbage_slots(); | 280 | { | 
| 277 | 281 | mutex_lock(&kprobe_insn_mutex); | |
| 282 | __free_insn_slot(&kprobe_insn_slots, slot, dirty); | ||
| 278 | mutex_unlock(&kprobe_insn_mutex); | 283 | mutex_unlock(&kprobe_insn_mutex); | 
| 279 | } | 284 | } | 
| 285 | #ifdef CONFIG_OPTPROBES | ||
| 286 | /* For optimized_kprobe buffer */ | ||
| 287 | static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */ | ||
| 288 | static struct kprobe_insn_cache kprobe_optinsn_slots = { | ||
| 289 | .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), | ||
| 290 | /* .insn_size is initialized later */ | ||
| 291 | .nr_garbage = 0, | ||
| 292 | }; | ||
| 293 | /* Get a slot for optimized_kprobe buffer */ | ||
| 294 | kprobe_opcode_t __kprobes *get_optinsn_slot(void) | ||
| 295 | { | ||
| 296 | kprobe_opcode_t *ret = NULL; | ||
| 297 | |||
| 298 | mutex_lock(&kprobe_optinsn_mutex); | ||
| 299 | ret = __get_insn_slot(&kprobe_optinsn_slots); | ||
| 300 | mutex_unlock(&kprobe_optinsn_mutex); | ||
| 301 | |||
| 302 | return ret; | ||
| 303 | } | ||
| 304 | |||
| 305 | void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty) | ||
| 306 | { | ||
| 307 | mutex_lock(&kprobe_optinsn_mutex); | ||
| 308 | __free_insn_slot(&kprobe_optinsn_slots, slot, dirty); | ||
| 309 | mutex_unlock(&kprobe_optinsn_mutex); | ||
| 310 | } | ||
| 311 | #endif | ||
| 280 | #endif | 312 | #endif | 
| 281 | 313 | ||
| 282 | /* We have preemption disabled.. so it is safe to use __ versions */ | 314 | /* We have preemption disabled.. so it is safe to use __ versions */ | 
| @@ -307,23 +339,401 @@ struct kprobe __kprobes *get_kprobe(void *addr) | |||
| 307 | if (p->addr == addr) | 339 | if (p->addr == addr) | 
| 308 | return p; | 340 | return p; | 
| 309 | } | 341 | } | 
| 342 | |||
| 310 | return NULL; | 343 | return NULL; | 
| 311 | } | 344 | } | 
| 312 | 345 | ||
| 346 | static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); | ||
| 347 | |||
| 348 | /* Return true if the kprobe is an aggregator */ | ||
| 349 | static inline int kprobe_aggrprobe(struct kprobe *p) | ||
| 350 | { | ||
| 351 | return p->pre_handler == aggr_pre_handler; | ||
| 352 | } | ||
| 353 | |||
| 354 | /* | ||
| 355 | * Keep all fields in the kprobe consistent | ||
| 356 | */ | ||
| 357 | static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) | ||
| 358 | { | ||
| 359 | memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); | ||
| 360 | memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); | ||
| 361 | } | ||
| 362 | |||
| 363 | #ifdef CONFIG_OPTPROBES | ||
| 364 | /* NOTE: change this value only with kprobe_mutex held */ | ||
| 365 | static bool kprobes_allow_optimization; | ||
| 366 | |||
| 367 | /* | ||
| 368 | * Call all pre_handler on the list, but ignores its return value. | ||
| 369 | * This must be called from arch-dep optimized caller. | ||
| 370 | */ | ||
| 371 | void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) | ||
| 372 | { | ||
| 373 | struct kprobe *kp; | ||
| 374 | |||
| 375 | list_for_each_entry_rcu(kp, &p->list, list) { | ||
| 376 | if (kp->pre_handler && likely(!kprobe_disabled(kp))) { | ||
| 377 | set_kprobe_instance(kp); | ||
| 378 | kp->pre_handler(kp, regs); | ||
| 379 | } | ||
| 380 | reset_kprobe_instance(); | ||
| 381 | } | ||
| 382 | } | ||
| 383 | |||
| 384 | /* Return true(!0) if the kprobe is ready for optimization. */ | ||
| 385 | static inline int kprobe_optready(struct kprobe *p) | ||
| 386 | { | ||
| 387 | struct optimized_kprobe *op; | ||
| 388 | |||
| 389 | if (kprobe_aggrprobe(p)) { | ||
| 390 | op = container_of(p, struct optimized_kprobe, kp); | ||
| 391 | return arch_prepared_optinsn(&op->optinsn); | ||
| 392 | } | ||
| 393 | |||
| 394 | return 0; | ||
| 395 | } | ||
| 396 | |||
| 397 | /* | ||
| 398 | * Return an optimized kprobe whose optimizing code replaces | ||
| 399 | * instructions including addr (exclude breakpoint). | ||
| 400 | */ | ||
| 401 | struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | ||
| 402 | { | ||
| 403 | int i; | ||
| 404 | struct kprobe *p = NULL; | ||
| 405 | struct optimized_kprobe *op; | ||
| 406 | |||
| 407 | /* Don't check i == 0, since that is a breakpoint case. */ | ||
| 408 | for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) | ||
| 409 | p = get_kprobe((void *)(addr - i)); | ||
| 410 | |||
| 411 | if (p && kprobe_optready(p)) { | ||
| 412 | op = container_of(p, struct optimized_kprobe, kp); | ||
| 413 | if (arch_within_optimized_kprobe(op, addr)) | ||
| 414 | return p; | ||
| 415 | } | ||
| 416 | |||
| 417 | return NULL; | ||
| 418 | } | ||
| 419 | |||
| 420 | /* Optimization staging list, protected by kprobe_mutex */ | ||
| 421 | static LIST_HEAD(optimizing_list); | ||
| 422 | |||
| 423 | static void kprobe_optimizer(struct work_struct *work); | ||
| 424 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); | ||
| 425 | #define OPTIMIZE_DELAY 5 | ||
| 426 | |||
| 427 | /* Kprobe jump optimizer */ | ||
| 428 | static __kprobes void kprobe_optimizer(struct work_struct *work) | ||
| 429 | { | ||
| 430 | struct optimized_kprobe *op, *tmp; | ||
| 431 | |||
| 432 | /* Lock modules while optimizing kprobes */ | ||
| 433 | mutex_lock(&module_mutex); | ||
| 434 | mutex_lock(&kprobe_mutex); | ||
| 435 | if (kprobes_all_disarmed || !kprobes_allow_optimization) | ||
| 436 | goto end; | ||
| 437 | |||
| 438 | /* | ||
| 439 | * Wait for quiesence period to ensure all running interrupts | ||
| 440 | * are done. Because optprobe may modify multiple instructions | ||
| 441 | * there is a chance that Nth instruction is interrupted. In that | ||
| 442 | * case, running interrupt can return to 2nd-Nth byte of jump | ||
| 443 | * instruction. This wait is for avoiding it. | ||
| 444 | */ | ||
| 445 | synchronize_sched(); | ||
| 446 | |||
| 447 | /* | ||
| 448 | * The optimization/unoptimization refers online_cpus via | ||
| 449 | * stop_machine() and cpu-hotplug modifies online_cpus. | ||
| 450 | * And same time, text_mutex will be held in cpu-hotplug and here. | ||
| 451 | * This combination can cause a deadlock (cpu-hotplug try to lock | ||
| 452 | * text_mutex but stop_machine can not be done because online_cpus | ||
| 453 | * has been changed) | ||
| 454 | * To avoid this deadlock, we need to call get_online_cpus() | ||
| 455 | * for preventing cpu-hotplug outside of text_mutex locking. | ||
| 456 | */ | ||
| 457 | get_online_cpus(); | ||
| 458 | mutex_lock(&text_mutex); | ||
| 459 | list_for_each_entry_safe(op, tmp, &optimizing_list, list) { | ||
| 460 | WARN_ON(kprobe_disabled(&op->kp)); | ||
| 461 | if (arch_optimize_kprobe(op) < 0) | ||
| 462 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | ||
| 463 | list_del_init(&op->list); | ||
| 464 | } | ||
| 465 | mutex_unlock(&text_mutex); | ||
| 466 | put_online_cpus(); | ||
| 467 | end: | ||
| 468 | mutex_unlock(&kprobe_mutex); | ||
| 469 | mutex_unlock(&module_mutex); | ||
| 470 | } | ||
| 471 | |||
| 472 | /* Optimize kprobe if p is ready to be optimized */ | ||
| 473 | static __kprobes void optimize_kprobe(struct kprobe *p) | ||
| 474 | { | ||
| 475 | struct optimized_kprobe *op; | ||
| 476 | |||
| 477 | /* Check if the kprobe is disabled or not ready for optimization. */ | ||
| 478 | if (!kprobe_optready(p) || !kprobes_allow_optimization || | ||
| 479 | (kprobe_disabled(p) || kprobes_all_disarmed)) | ||
| 480 | return; | ||
| 481 | |||
| 482 | /* Both of break_handler and post_handler are not supported. */ | ||
| 483 | if (p->break_handler || p->post_handler) | ||
| 484 | return; | ||
| 485 | |||
| 486 | op = container_of(p, struct optimized_kprobe, kp); | ||
| 487 | |||
| 488 | /* Check there is no other kprobes at the optimized instructions */ | ||
| 489 | if (arch_check_optimized_kprobe(op) < 0) | ||
| 490 | return; | ||
| 491 | |||
| 492 | /* Check if it is already optimized. */ | ||
| 493 | if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) | ||
| 494 | return; | ||
| 495 | |||
| 496 | op->kp.flags |= KPROBE_FLAG_OPTIMIZED; | ||
| 497 | list_add(&op->list, &optimizing_list); | ||
| 498 | if (!delayed_work_pending(&optimizing_work)) | ||
| 499 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); | ||
| 500 | } | ||
| 501 | |||
| 502 | /* Unoptimize a kprobe if p is optimized */ | ||
| 503 | static __kprobes void unoptimize_kprobe(struct kprobe *p) | ||
| 504 | { | ||
| 505 | struct optimized_kprobe *op; | ||
| 506 | |||
| 507 | if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) { | ||
| 508 | op = container_of(p, struct optimized_kprobe, kp); | ||
| 509 | if (!list_empty(&op->list)) | ||
| 510 | /* Dequeue from the optimization queue */ | ||
| 511 | list_del_init(&op->list); | ||
| 512 | else | ||
| 513 | /* Replace jump with break */ | ||
| 514 | arch_unoptimize_kprobe(op); | ||
| 515 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | ||
| 516 | } | ||
| 517 | } | ||
| 518 | |||
| 519 | /* Remove optimized instructions */ | ||
| 520 | static void __kprobes kill_optimized_kprobe(struct kprobe *p) | ||
| 521 | { | ||
| 522 | struct optimized_kprobe *op; | ||
| 523 | |||
| 524 | op = container_of(p, struct optimized_kprobe, kp); | ||
| 525 | if (!list_empty(&op->list)) { | ||
| 526 | /* Dequeue from the optimization queue */ | ||
| 527 | list_del_init(&op->list); | ||
| 528 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | ||
| 529 | } | ||
| 530 | /* Don't unoptimize, because the target code will be freed. */ | ||
| 531 | arch_remove_optimized_kprobe(op); | ||
| 532 | } | ||
| 533 | |||
| 534 | /* Try to prepare optimized instructions */ | ||
| 535 | static __kprobes void prepare_optimized_kprobe(struct kprobe *p) | ||
| 536 | { | ||
| 537 | struct optimized_kprobe *op; | ||
| 538 | |||
| 539 | op = container_of(p, struct optimized_kprobe, kp); | ||
| 540 | arch_prepare_optimized_kprobe(op); | ||
| 541 | } | ||
| 542 | |||
| 543 | /* Free optimized instructions and optimized_kprobe */ | ||
| 544 | static __kprobes void free_aggr_kprobe(struct kprobe *p) | ||
| 545 | { | ||
| 546 | struct optimized_kprobe *op; | ||
| 547 | |||
| 548 | op = container_of(p, struct optimized_kprobe, kp); | ||
| 549 | arch_remove_optimized_kprobe(op); | ||
| 550 | kfree(op); | ||
| 551 | } | ||
| 552 | |||
| 553 | /* Allocate new optimized_kprobe and try to prepare optimized instructions */ | ||
| 554 | static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) | ||
| 555 | { | ||
| 556 | struct optimized_kprobe *op; | ||
| 557 | |||
| 558 | op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); | ||
| 559 | if (!op) | ||
| 560 | return NULL; | ||
| 561 | |||
| 562 | INIT_LIST_HEAD(&op->list); | ||
| 563 | op->kp.addr = p->addr; | ||
| 564 | arch_prepare_optimized_kprobe(op); | ||
| 565 | |||
| 566 | return &op->kp; | ||
| 567 | } | ||
| 568 | |||
| 569 | static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); | ||
| 570 | |||
| 571 | /* | ||
| 572 | * Prepare an optimized_kprobe and optimize it | ||
| 573 | * NOTE: p must be a normal registered kprobe | ||
| 574 | */ | ||
| 575 | static __kprobes void try_to_optimize_kprobe(struct kprobe *p) | ||
| 576 | { | ||
| 577 | struct kprobe *ap; | ||
| 578 | struct optimized_kprobe *op; | ||
| 579 | |||
| 580 | ap = alloc_aggr_kprobe(p); | ||
| 581 | if (!ap) | ||
| 582 | return; | ||
| 583 | |||
| 584 | op = container_of(ap, struct optimized_kprobe, kp); | ||
| 585 | if (!arch_prepared_optinsn(&op->optinsn)) { | ||
| 586 | /* If failed to setup optimizing, fallback to kprobe */ | ||
| 587 | free_aggr_kprobe(ap); | ||
| 588 | return; | ||
| 589 | } | ||
| 590 | |||
| 591 | init_aggr_kprobe(ap, p); | ||
| 592 | optimize_kprobe(ap); | ||
| 593 | } | ||
| 594 | |||
| 595 | #ifdef CONFIG_SYSCTL | ||
| 596 | static void __kprobes optimize_all_kprobes(void) | ||
| 597 | { | ||
| 598 | struct hlist_head *head; | ||
| 599 | struct hlist_node *node; | ||
| 600 | struct kprobe *p; | ||
| 601 | unsigned int i; | ||
| 602 | |||
| 603 | /* If optimization is already allowed, just return */ | ||
| 604 | if (kprobes_allow_optimization) | ||
| 605 | return; | ||
| 606 | |||
| 607 | kprobes_allow_optimization = true; | ||
| 608 | mutex_lock(&text_mutex); | ||
| 609 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
| 610 | head = &kprobe_table[i]; | ||
| 611 | hlist_for_each_entry_rcu(p, node, head, hlist) | ||
| 612 | if (!kprobe_disabled(p)) | ||
| 613 | optimize_kprobe(p); | ||
| 614 | } | ||
| 615 | mutex_unlock(&text_mutex); | ||
| 616 | printk(KERN_INFO "Kprobes globally optimized\n"); | ||
| 617 | } | ||
| 618 | |||
| 619 | static void __kprobes unoptimize_all_kprobes(void) | ||
| 620 | { | ||
| 621 | struct hlist_head *head; | ||
| 622 | struct hlist_node *node; | ||
| 623 | struct kprobe *p; | ||
| 624 | unsigned int i; | ||
| 625 | |||
| 626 | /* If optimization is already prohibited, just return */ | ||
| 627 | if (!kprobes_allow_optimization) | ||
| 628 | return; | ||
| 629 | |||
| 630 | kprobes_allow_optimization = false; | ||
| 631 | printk(KERN_INFO "Kprobes globally unoptimized\n"); | ||
| 632 | get_online_cpus(); /* For avoiding text_mutex deadlock */ | ||
| 633 | mutex_lock(&text_mutex); | ||
| 634 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | ||
| 635 | head = &kprobe_table[i]; | ||
| 636 | hlist_for_each_entry_rcu(p, node, head, hlist) { | ||
| 637 | if (!kprobe_disabled(p)) | ||
| 638 | unoptimize_kprobe(p); | ||
| 639 | } | ||
| 640 | } | ||
| 641 | |||
| 642 | mutex_unlock(&text_mutex); | ||
| 643 | put_online_cpus(); | ||
| 644 | /* Allow all currently running kprobes to complete */ | ||
| 645 | synchronize_sched(); | ||
| 646 | } | ||
| 647 | |||
| 648 | int sysctl_kprobes_optimization; | ||
| 649 | int proc_kprobes_optimization_handler(struct ctl_table *table, int write, | ||
| 650 | void __user *buffer, size_t *length, | ||
| 651 | loff_t *ppos) | ||
| 652 | { | ||
| 653 | int ret; | ||
| 654 | |||
| 655 | mutex_lock(&kprobe_mutex); | ||
| 656 | sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; | ||
| 657 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); | ||
| 658 | |||
| 659 | if (sysctl_kprobes_optimization) | ||
| 660 | optimize_all_kprobes(); | ||
| 661 | else | ||
| 662 | unoptimize_all_kprobes(); | ||
| 663 | mutex_unlock(&kprobe_mutex); | ||
| 664 | |||
| 665 | return ret; | ||
| 666 | } | ||
| 667 | #endif /* CONFIG_SYSCTL */ | ||
| 668 | |||
| 669 | static void __kprobes __arm_kprobe(struct kprobe *p) | ||
| 670 | { | ||
| 671 | struct kprobe *old_p; | ||
| 672 | |||
| 673 | /* Check collision with other optimized kprobes */ | ||
| 674 | old_p = get_optimized_kprobe((unsigned long)p->addr); | ||
| 675 | if (unlikely(old_p)) | ||
| 676 | unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */ | ||
| 677 | |||
| 678 | arch_arm_kprobe(p); | ||
| 679 | optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ | ||
| 680 | } | ||
| 681 | |||
| 682 | static void __kprobes __disarm_kprobe(struct kprobe *p) | ||
| 683 | { | ||
| 684 | struct kprobe *old_p; | ||
| 685 | |||
| 686 | unoptimize_kprobe(p); /* Try to unoptimize */ | ||
| 687 | arch_disarm_kprobe(p); | ||
| 688 | |||
| 689 | /* If another kprobe was blocked, optimize it. */ | ||
| 690 | old_p = get_optimized_kprobe((unsigned long)p->addr); | ||
| 691 | if (unlikely(old_p)) | ||
| 692 | optimize_kprobe(old_p); | ||
| 693 | } | ||
| 694 | |||
| 695 | #else /* !CONFIG_OPTPROBES */ | ||
| 696 | |||
| 697 | #define optimize_kprobe(p) do {} while (0) | ||
| 698 | #define unoptimize_kprobe(p) do {} while (0) | ||
| 699 | #define kill_optimized_kprobe(p) do {} while (0) | ||
| 700 | #define prepare_optimized_kprobe(p) do {} while (0) | ||
| 701 | #define try_to_optimize_kprobe(p) do {} while (0) | ||
| 702 | #define __arm_kprobe(p) arch_arm_kprobe(p) | ||
| 703 | #define __disarm_kprobe(p) arch_disarm_kprobe(p) | ||
| 704 | |||
| 705 | static __kprobes void free_aggr_kprobe(struct kprobe *p) | ||
| 706 | { | ||
| 707 | kfree(p); | ||
| 708 | } | ||
| 709 | |||
| 710 | static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) | ||
| 711 | { | ||
| 712 | return kzalloc(sizeof(struct kprobe), GFP_KERNEL); | ||
| 713 | } | ||
| 714 | #endif /* CONFIG_OPTPROBES */ | ||
| 715 | |||
| 313 | /* Arm a kprobe with text_mutex */ | 716 | /* Arm a kprobe with text_mutex */ | 
| 314 | static void __kprobes arm_kprobe(struct kprobe *kp) | 717 | static void __kprobes arm_kprobe(struct kprobe *kp) | 
| 315 | { | 718 | { | 
| 719 | /* | ||
| 720 | * Here, since __arm_kprobe() doesn't use stop_machine(), | ||
| 721 | * this doesn't cause deadlock on text_mutex. So, we don't | ||
| 722 | * need get_online_cpus(). | ||
| 723 | */ | ||
| 316 | mutex_lock(&text_mutex); | 724 | mutex_lock(&text_mutex); | 
| 317 | arch_arm_kprobe(kp); | 725 | __arm_kprobe(kp); | 
| 318 | mutex_unlock(&text_mutex); | 726 | mutex_unlock(&text_mutex); | 
| 319 | } | 727 | } | 
| 320 | 728 | ||
| 321 | /* Disarm a kprobe with text_mutex */ | 729 | /* Disarm a kprobe with text_mutex */ | 
| 322 | static void __kprobes disarm_kprobe(struct kprobe *kp) | 730 | static void __kprobes disarm_kprobe(struct kprobe *kp) | 
| 323 | { | 731 | { | 
| 732 | get_online_cpus(); /* For avoiding text_mutex deadlock */ | ||
| 324 | mutex_lock(&text_mutex); | 733 | mutex_lock(&text_mutex); | 
| 325 | arch_disarm_kprobe(kp); | 734 | __disarm_kprobe(kp); | 
| 326 | mutex_unlock(&text_mutex); | 735 | mutex_unlock(&text_mutex); | 
| 736 | put_online_cpus(); | ||
| 327 | } | 737 | } | 
| 328 | 738 | ||
| 329 | /* | 739 | /* | 
| @@ -392,7 +802,7 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 392 | void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) | 802 | void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) | 
| 393 | { | 803 | { | 
| 394 | struct kprobe *kp; | 804 | struct kprobe *kp; | 
| 395 | if (p->pre_handler != aggr_pre_handler) { | 805 | if (!kprobe_aggrprobe(p)) { | 
| 396 | p->nmissed++; | 806 | p->nmissed++; | 
| 397 | } else { | 807 | } else { | 
| 398 | list_for_each_entry_rcu(kp, &p->list, list) | 808 | list_for_each_entry_rcu(kp, &p->list, list) | 
| @@ -516,21 +926,16 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp) | |||
| 516 | } | 926 | } | 
| 517 | 927 | ||
| 518 | /* | 928 | /* | 
| 519 | * Keep all fields in the kprobe consistent | ||
| 520 | */ | ||
| 521 | static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) | ||
| 522 | { | ||
| 523 | memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); | ||
| 524 | memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); | ||
| 525 | } | ||
| 526 | |||
| 527 | /* | ||
| 528 | * Add the new probe to ap->list. Fail if this is the | 929 | * Add the new probe to ap->list. Fail if this is the | 
| 529 | * second jprobe at the address - two jprobes can't coexist | 930 | * second jprobe at the address - two jprobes can't coexist | 
| 530 | */ | 931 | */ | 
| 531 | static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | 932 | static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | 
| 532 | { | 933 | { | 
| 533 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); | 934 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); | 
| 935 | |||
| 936 | if (p->break_handler || p->post_handler) | ||
| 937 | unoptimize_kprobe(ap); /* Fall back to normal kprobe */ | ||
| 938 | |||
| 534 | if (p->break_handler) { | 939 | if (p->break_handler) { | 
| 535 | if (ap->break_handler) | 940 | if (ap->break_handler) | 
| 536 | return -EEXIST; | 941 | return -EEXIST; | 
| @@ -545,7 +950,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
| 545 | ap->flags &= ~KPROBE_FLAG_DISABLED; | 950 | ap->flags &= ~KPROBE_FLAG_DISABLED; | 
| 546 | if (!kprobes_all_disarmed) | 951 | if (!kprobes_all_disarmed) | 
| 547 | /* Arm the breakpoint again. */ | 952 | /* Arm the breakpoint again. */ | 
| 548 | arm_kprobe(ap); | 953 | __arm_kprobe(ap); | 
| 549 | } | 954 | } | 
| 550 | return 0; | 955 | return 0; | 
| 551 | } | 956 | } | 
| @@ -554,12 +959,13 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
| 554 | * Fill in the required fields of the "manager kprobe". Replace the | 959 | * Fill in the required fields of the "manager kprobe". Replace the | 
| 555 | * earlier kprobe in the hlist with the manager kprobe | 960 | * earlier kprobe in the hlist with the manager kprobe | 
| 556 | */ | 961 | */ | 
| 557 | static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | 962 | static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | 
| 558 | { | 963 | { | 
| 964 | /* Copy p's insn slot to ap */ | ||
| 559 | copy_kprobe(p, ap); | 965 | copy_kprobe(p, ap); | 
| 560 | flush_insn_slot(ap); | 966 | flush_insn_slot(ap); | 
| 561 | ap->addr = p->addr; | 967 | ap->addr = p->addr; | 
| 562 | ap->flags = p->flags; | 968 | ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; | 
| 563 | ap->pre_handler = aggr_pre_handler; | 969 | ap->pre_handler = aggr_pre_handler; | 
| 564 | ap->fault_handler = aggr_fault_handler; | 970 | ap->fault_handler = aggr_fault_handler; | 
| 565 | /* We don't care the kprobe which has gone. */ | 971 | /* We don't care the kprobe which has gone. */ | 
| @@ -569,8 +975,9 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
| 569 | ap->break_handler = aggr_break_handler; | 975 | ap->break_handler = aggr_break_handler; | 
| 570 | 976 | ||
| 571 | INIT_LIST_HEAD(&ap->list); | 977 | INIT_LIST_HEAD(&ap->list); | 
| 572 | list_add_rcu(&p->list, &ap->list); | 978 | INIT_HLIST_NODE(&ap->hlist); | 
| 573 | 979 | ||
| 980 | list_add_rcu(&p->list, &ap->list); | ||
| 574 | hlist_replace_rcu(&p->hlist, &ap->hlist); | 981 | hlist_replace_rcu(&p->hlist, &ap->hlist); | 
| 575 | } | 982 | } | 
| 576 | 983 | ||
| @@ -584,12 +991,12 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
| 584 | int ret = 0; | 991 | int ret = 0; | 
| 585 | struct kprobe *ap = old_p; | 992 | struct kprobe *ap = old_p; | 
| 586 | 993 | ||
| 587 | if (old_p->pre_handler != aggr_pre_handler) { | 994 | if (!kprobe_aggrprobe(old_p)) { | 
| 588 | /* If old_p is not an aggr_probe, create new aggr_kprobe. */ | 995 | /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */ | 
| 589 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); | 996 | ap = alloc_aggr_kprobe(old_p); | 
| 590 | if (!ap) | 997 | if (!ap) | 
| 591 | return -ENOMEM; | 998 | return -ENOMEM; | 
| 592 | add_aggr_kprobe(ap, old_p); | 999 | init_aggr_kprobe(ap, old_p); | 
| 593 | } | 1000 | } | 
| 594 | 1001 | ||
| 595 | if (kprobe_gone(ap)) { | 1002 | if (kprobe_gone(ap)) { | 
| @@ -608,6 +1015,9 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
| 608 | */ | 1015 | */ | 
| 609 | return ret; | 1016 | return ret; | 
| 610 | 1017 | ||
| 1018 | /* Prepare optimized instructions if possible. */ | ||
| 1019 | prepare_optimized_kprobe(ap); | ||
| 1020 | |||
| 611 | /* | 1021 | /* | 
| 612 | * Clear gone flag to prevent allocating new slot again, and | 1022 | * Clear gone flag to prevent allocating new slot again, and | 
| 613 | * set disabled flag because it is not armed yet. | 1023 | * set disabled flag because it is not armed yet. | 
| @@ -616,6 +1026,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
| 616 | | KPROBE_FLAG_DISABLED; | 1026 | | KPROBE_FLAG_DISABLED; | 
| 617 | } | 1027 | } | 
| 618 | 1028 | ||
| 1029 | /* Copy ap's insn slot to p */ | ||
| 619 | copy_kprobe(ap, p); | 1030 | copy_kprobe(ap, p); | 
| 620 | return add_new_kprobe(ap, p); | 1031 | return add_new_kprobe(ap, p); | 
| 621 | } | 1032 | } | 
| @@ -728,7 +1139,8 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 728 | 1139 | ||
| 729 | preempt_disable(); | 1140 | preempt_disable(); | 
| 730 | if (!kernel_text_address((unsigned long) p->addr) || | 1141 | if (!kernel_text_address((unsigned long) p->addr) || | 
| 731 | in_kprobes_functions((unsigned long) p->addr)) { | 1142 | in_kprobes_functions((unsigned long) p->addr) || | 
| 1143 | ftrace_text_reserved(p->addr, p->addr)) { | ||
| 732 | preempt_enable(); | 1144 | preempt_enable(); | 
| 733 | return -EINVAL; | 1145 | return -EINVAL; | 
| 734 | } | 1146 | } | 
| @@ -765,27 +1177,34 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 765 | p->nmissed = 0; | 1177 | p->nmissed = 0; | 
| 766 | INIT_LIST_HEAD(&p->list); | 1178 | INIT_LIST_HEAD(&p->list); | 
| 767 | mutex_lock(&kprobe_mutex); | 1179 | mutex_lock(&kprobe_mutex); | 
| 1180 | |||
| 1181 | get_online_cpus(); /* For avoiding text_mutex deadlock. */ | ||
| 1182 | mutex_lock(&text_mutex); | ||
| 1183 | |||
| 768 | old_p = get_kprobe(p->addr); | 1184 | old_p = get_kprobe(p->addr); | 
| 769 | if (old_p) { | 1185 | if (old_p) { | 
| 1186 | /* Since this may unoptimize old_p, locking text_mutex. */ | ||
| 770 | ret = register_aggr_kprobe(old_p, p); | 1187 | ret = register_aggr_kprobe(old_p, p); | 
| 771 | goto out; | 1188 | goto out; | 
| 772 | } | 1189 | } | 
| 773 | 1190 | ||
| 774 | mutex_lock(&text_mutex); | ||
| 775 | ret = arch_prepare_kprobe(p); | 1191 | ret = arch_prepare_kprobe(p); | 
| 776 | if (ret) | 1192 | if (ret) | 
| 777 | goto out_unlock_text; | 1193 | goto out; | 
| 778 | 1194 | ||
| 779 | INIT_HLIST_NODE(&p->hlist); | 1195 | INIT_HLIST_NODE(&p->hlist); | 
| 780 | hlist_add_head_rcu(&p->hlist, | 1196 | hlist_add_head_rcu(&p->hlist, | 
| 781 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 1197 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 
| 782 | 1198 | ||
| 783 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) | 1199 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) | 
| 784 | arch_arm_kprobe(p); | 1200 | __arm_kprobe(p); | 
| 1201 | |||
| 1202 | /* Try to optimize kprobe */ | ||
| 1203 | try_to_optimize_kprobe(p); | ||
| 785 | 1204 | ||
| 786 | out_unlock_text: | ||
| 787 | mutex_unlock(&text_mutex); | ||
| 788 | out: | 1205 | out: | 
| 1206 | mutex_unlock(&text_mutex); | ||
| 1207 | put_online_cpus(); | ||
| 789 | mutex_unlock(&kprobe_mutex); | 1208 | mutex_unlock(&kprobe_mutex); | 
| 790 | 1209 | ||
| 791 | if (probed_mod) | 1210 | if (probed_mod) | 
| @@ -807,7 +1226,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p) | |||
| 807 | return -EINVAL; | 1226 | return -EINVAL; | 
| 808 | 1227 | ||
| 809 | if (old_p == p || | 1228 | if (old_p == p || | 
| 810 | (old_p->pre_handler == aggr_pre_handler && | 1229 | (kprobe_aggrprobe(old_p) && | 
| 811 | list_is_singular(&old_p->list))) { | 1230 | list_is_singular(&old_p->list))) { | 
| 812 | /* | 1231 | /* | 
| 813 | * Only probe on the hash list. Disarm only if kprobes are | 1232 | * Only probe on the hash list. Disarm only if kprobes are | 
| @@ -815,7 +1234,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p) | |||
| 815 | * already have been removed. We save on flushing icache. | 1234 | * already have been removed. We save on flushing icache. | 
| 816 | */ | 1235 | */ | 
| 817 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) | 1236 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) | 
| 818 | disarm_kprobe(p); | 1237 | disarm_kprobe(old_p); | 
| 819 | hlist_del_rcu(&old_p->hlist); | 1238 | hlist_del_rcu(&old_p->hlist); | 
| 820 | } else { | 1239 | } else { | 
| 821 | if (p->break_handler && !kprobe_gone(p)) | 1240 | if (p->break_handler && !kprobe_gone(p)) | 
| @@ -831,8 +1250,13 @@ noclean: | |||
| 831 | list_del_rcu(&p->list); | 1250 | list_del_rcu(&p->list); | 
| 832 | if (!kprobe_disabled(old_p)) { | 1251 | if (!kprobe_disabled(old_p)) { | 
| 833 | try_to_disable_aggr_kprobe(old_p); | 1252 | try_to_disable_aggr_kprobe(old_p); | 
| 834 | if (!kprobes_all_disarmed && kprobe_disabled(old_p)) | 1253 | if (!kprobes_all_disarmed) { | 
| 835 | disarm_kprobe(old_p); | 1254 | if (kprobe_disabled(old_p)) | 
| 1255 | disarm_kprobe(old_p); | ||
| 1256 | else | ||
| 1257 | /* Try to optimize this probe again */ | ||
| 1258 | optimize_kprobe(old_p); | ||
| 1259 | } | ||
| 836 | } | 1260 | } | 
| 837 | } | 1261 | } | 
| 838 | return 0; | 1262 | return 0; | 
| @@ -849,7 +1273,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) | |||
| 849 | old_p = list_entry(p->list.next, struct kprobe, list); | 1273 | old_p = list_entry(p->list.next, struct kprobe, list); | 
| 850 | list_del(&p->list); | 1274 | list_del(&p->list); | 
| 851 | arch_remove_kprobe(old_p); | 1275 | arch_remove_kprobe(old_p); | 
| 852 | kfree(old_p); | 1276 | free_aggr_kprobe(old_p); | 
| 853 | } | 1277 | } | 
| 854 | } | 1278 | } | 
| 855 | 1279 | ||
| @@ -1145,7 +1569,7 @@ static void __kprobes kill_kprobe(struct kprobe *p) | |||
| 1145 | struct kprobe *kp; | 1569 | struct kprobe *kp; | 
| 1146 | 1570 | ||
| 1147 | p->flags |= KPROBE_FLAG_GONE; | 1571 | p->flags |= KPROBE_FLAG_GONE; | 
| 1148 | if (p->pre_handler == aggr_pre_handler) { | 1572 | if (kprobe_aggrprobe(p)) { | 
| 1149 | /* | 1573 | /* | 
| 1150 | * If this is an aggr_kprobe, we have to list all the | 1574 | * If this is an aggr_kprobe, we have to list all the | 
| 1151 | * chained probes and mark them GONE. | 1575 | * chained probes and mark them GONE. | 
| @@ -1154,6 +1578,7 @@ static void __kprobes kill_kprobe(struct kprobe *p) | |||
| 1154 | kp->flags |= KPROBE_FLAG_GONE; | 1578 | kp->flags |= KPROBE_FLAG_GONE; | 
| 1155 | p->post_handler = NULL; | 1579 | p->post_handler = NULL; | 
| 1156 | p->break_handler = NULL; | 1580 | p->break_handler = NULL; | 
| 1581 | kill_optimized_kprobe(p); | ||
| 1157 | } | 1582 | } | 
| 1158 | /* | 1583 | /* | 
| 1159 | * Here, we can remove insn_slot safely, because no thread calls | 1584 | * Here, we can remove insn_slot safely, because no thread calls | 
| @@ -1263,6 +1688,15 @@ static int __init init_kprobes(void) | |||
| 1263 | } | 1688 | } | 
| 1264 | } | 1689 | } | 
| 1265 | 1690 | ||
| 1691 | #if defined(CONFIG_OPTPROBES) | ||
| 1692 | #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) | ||
| 1693 | /* Init kprobe_optinsn_slots */ | ||
| 1694 | kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; | ||
| 1695 | #endif | ||
| 1696 | /* By default, kprobes can be optimized */ | ||
| 1697 | kprobes_allow_optimization = true; | ||
| 1698 | #endif | ||
| 1699 | |||
| 1266 | /* By default, kprobes are armed */ | 1700 | /* By default, kprobes are armed */ | 
| 1267 | kprobes_all_disarmed = false; | 1701 | kprobes_all_disarmed = false; | 
| 1268 | 1702 | ||
| @@ -1281,7 +1715,7 @@ static int __init init_kprobes(void) | |||
| 1281 | 1715 | ||
| 1282 | #ifdef CONFIG_DEBUG_FS | 1716 | #ifdef CONFIG_DEBUG_FS | 
| 1283 | static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | 1717 | static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | 
| 1284 | const char *sym, int offset,char *modname) | 1718 | const char *sym, int offset, char *modname, struct kprobe *pp) | 
| 1285 | { | 1719 | { | 
| 1286 | char *kprobe_type; | 1720 | char *kprobe_type; | 
| 1287 | 1721 | ||
| @@ -1291,19 +1725,21 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | |||
| 1291 | kprobe_type = "j"; | 1725 | kprobe_type = "j"; | 
| 1292 | else | 1726 | else | 
| 1293 | kprobe_type = "k"; | 1727 | kprobe_type = "k"; | 
| 1728 | |||
| 1294 | if (sym) | 1729 | if (sym) | 
| 1295 | seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", | 1730 | seq_printf(pi, "%p %s %s+0x%x %s ", | 
| 1296 | p->addr, kprobe_type, sym, offset, | 1731 | p->addr, kprobe_type, sym, offset, | 
| 1297 | (modname ? modname : " "), | 1732 | (modname ? modname : " ")); | 
| 1298 | (kprobe_gone(p) ? "[GONE]" : ""), | ||
| 1299 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? | ||
| 1300 | "[DISABLED]" : "")); | ||
| 1301 | else | 1733 | else | 
| 1302 | seq_printf(pi, "%p %s %p %s%s\n", | 1734 | seq_printf(pi, "%p %s %p ", | 
| 1303 | p->addr, kprobe_type, p->addr, | 1735 | p->addr, kprobe_type, p->addr); | 
| 1304 | (kprobe_gone(p) ? "[GONE]" : ""), | 1736 | |
| 1305 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? | 1737 | if (!pp) | 
| 1306 | "[DISABLED]" : "")); | 1738 | pp = p; | 
| 1739 | seq_printf(pi, "%s%s%s\n", | ||
| 1740 | (kprobe_gone(p) ? "[GONE]" : ""), | ||
| 1741 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), | ||
| 1742 | (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); | ||
| 1307 | } | 1743 | } | 
| 1308 | 1744 | ||
| 1309 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | 1745 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | 
| @@ -1339,11 +1775,11 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) | |||
| 1339 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 1775 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 
| 1340 | sym = kallsyms_lookup((unsigned long)p->addr, NULL, | 1776 | sym = kallsyms_lookup((unsigned long)p->addr, NULL, | 
| 1341 | &offset, &modname, namebuf); | 1777 | &offset, &modname, namebuf); | 
| 1342 | if (p->pre_handler == aggr_pre_handler) { | 1778 | if (kprobe_aggrprobe(p)) { | 
| 1343 | list_for_each_entry_rcu(kp, &p->list, list) | 1779 | list_for_each_entry_rcu(kp, &p->list, list) | 
| 1344 | report_probe(pi, kp, sym, offset, modname); | 1780 | report_probe(pi, kp, sym, offset, modname, p); | 
| 1345 | } else | 1781 | } else | 
| 1346 | report_probe(pi, p, sym, offset, modname); | 1782 | report_probe(pi, p, sym, offset, modname, NULL); | 
| 1347 | } | 1783 | } | 
| 1348 | preempt_enable(); | 1784 | preempt_enable(); | 
| 1349 | return 0; | 1785 | return 0; | 
| @@ -1421,12 +1857,13 @@ int __kprobes enable_kprobe(struct kprobe *kp) | |||
| 1421 | goto out; | 1857 | goto out; | 
| 1422 | } | 1858 | } | 
| 1423 | 1859 | ||
| 1424 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | ||
| 1425 | arm_kprobe(p); | ||
| 1426 | |||
| 1427 | p->flags &= ~KPROBE_FLAG_DISABLED; | ||
| 1428 | if (p != kp) | 1860 | if (p != kp) | 
| 1429 | kp->flags &= ~KPROBE_FLAG_DISABLED; | 1861 | kp->flags &= ~KPROBE_FLAG_DISABLED; | 
| 1862 | |||
| 1863 | if (!kprobes_all_disarmed && kprobe_disabled(p)) { | ||
| 1864 | p->flags &= ~KPROBE_FLAG_DISABLED; | ||
| 1865 | arm_kprobe(p); | ||
| 1866 | } | ||
| 1430 | out: | 1867 | out: | 
| 1431 | mutex_unlock(&kprobe_mutex); | 1868 | mutex_unlock(&kprobe_mutex); | 
| 1432 | return ret; | 1869 | return ret; | 
| @@ -1446,12 +1883,13 @@ static void __kprobes arm_all_kprobes(void) | |||
| 1446 | if (!kprobes_all_disarmed) | 1883 | if (!kprobes_all_disarmed) | 
| 1447 | goto already_enabled; | 1884 | goto already_enabled; | 
| 1448 | 1885 | ||
| 1886 | /* Arming kprobes doesn't optimize kprobe itself */ | ||
| 1449 | mutex_lock(&text_mutex); | 1887 | mutex_lock(&text_mutex); | 
| 1450 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1888 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 
| 1451 | head = &kprobe_table[i]; | 1889 | head = &kprobe_table[i]; | 
| 1452 | hlist_for_each_entry_rcu(p, node, head, hlist) | 1890 | hlist_for_each_entry_rcu(p, node, head, hlist) | 
| 1453 | if (!kprobe_disabled(p)) | 1891 | if (!kprobe_disabled(p)) | 
| 1454 | arch_arm_kprobe(p); | 1892 | __arm_kprobe(p); | 
| 1455 | } | 1893 | } | 
| 1456 | mutex_unlock(&text_mutex); | 1894 | mutex_unlock(&text_mutex); | 
| 1457 | 1895 | ||
| @@ -1478,16 +1916,23 @@ static void __kprobes disarm_all_kprobes(void) | |||
| 1478 | 1916 | ||
| 1479 | kprobes_all_disarmed = true; | 1917 | kprobes_all_disarmed = true; | 
| 1480 | printk(KERN_INFO "Kprobes globally disabled\n"); | 1918 | printk(KERN_INFO "Kprobes globally disabled\n"); | 
| 1919 | |||
| 1920 | /* | ||
| 1921 | * Here we call get_online_cpus() for avoiding text_mutex deadlock, | ||
| 1922 | * because disarming may also unoptimize kprobes. | ||
| 1923 | */ | ||
| 1924 | get_online_cpus(); | ||
| 1481 | mutex_lock(&text_mutex); | 1925 | mutex_lock(&text_mutex); | 
| 1482 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1926 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 
| 1483 | head = &kprobe_table[i]; | 1927 | head = &kprobe_table[i]; | 
| 1484 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 1928 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 
| 1485 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) | 1929 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) | 
| 1486 | arch_disarm_kprobe(p); | 1930 | __disarm_kprobe(p); | 
| 1487 | } | 1931 | } | 
| 1488 | } | 1932 | } | 
| 1489 | 1933 | ||
| 1490 | mutex_unlock(&text_mutex); | 1934 | mutex_unlock(&text_mutex); | 
| 1935 | put_online_cpus(); | ||
| 1491 | mutex_unlock(&kprobe_mutex); | 1936 | mutex_unlock(&kprobe_mutex); | 
| 1492 | /* Allow all currently running kprobes to complete */ | 1937 | /* Allow all currently running kprobes to complete */ | 
| 1493 | synchronize_sched(); | 1938 | synchronize_sched(); | 
