diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/jump_label.c | 77 | ||||
-rw-r--r-- | kernel/kprobes.c | 26 |
2 files changed, 82 insertions, 21 deletions
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 7be868bf25c6..3b79bd938330 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -39,6 +39,16 @@ struct jump_label_module_entry { | |||
39 | struct module *mod; | 39 | struct module *mod; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | void jump_label_lock(void) | ||
43 | { | ||
44 | mutex_lock(&jump_label_mutex); | ||
45 | } | ||
46 | |||
47 | void jump_label_unlock(void) | ||
48 | { | ||
49 | mutex_unlock(&jump_label_mutex); | ||
50 | } | ||
51 | |||
42 | static int jump_label_cmp(const void *a, const void *b) | 52 | static int jump_label_cmp(const void *a, const void *b) |
43 | { | 53 | { |
44 | const struct jump_entry *jea = a; | 54 | const struct jump_entry *jea = a; |
@@ -152,7 +162,7 @@ void jump_label_update(unsigned long key, enum jump_label_type type) | |||
152 | struct jump_label_module_entry *e_module; | 162 | struct jump_label_module_entry *e_module; |
153 | int count; | 163 | int count; |
154 | 164 | ||
155 | mutex_lock(&jump_label_mutex); | 165 | jump_label_lock(); |
156 | entry = get_jump_label_entry((jump_label_t)key); | 166 | entry = get_jump_label_entry((jump_label_t)key); |
157 | if (entry) { | 167 | if (entry) { |
158 | count = entry->nr_entries; | 168 | count = entry->nr_entries; |
@@ -168,13 +178,14 @@ void jump_label_update(unsigned long key, enum jump_label_type type) | |||
168 | count = e_module->nr_entries; | 178 | count = e_module->nr_entries; |
169 | iter = e_module->table; | 179 | iter = e_module->table; |
170 | while (count--) { | 180 | while (count--) { |
171 | if (kernel_text_address(iter->code)) | 181 | if (iter->key && |
182 | kernel_text_address(iter->code)) | ||
172 | arch_jump_label_transform(iter, type); | 183 | arch_jump_label_transform(iter, type); |
173 | iter++; | 184 | iter++; |
174 | } | 185 | } |
175 | } | 186 | } |
176 | } | 187 | } |
177 | mutex_unlock(&jump_label_mutex); | 188 | jump_label_unlock(); |
178 | } | 189 | } |
179 | 190 | ||
180 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) | 191 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) |
@@ -231,6 +242,7 @@ out: | |||
231 | * overlaps with any of the jump label patch addresses. Code | 242 | * overlaps with any of the jump label patch addresses. Code |
232 | * that wants to modify kernel text should first verify that | 243 | * that wants to modify kernel text should first verify that |
233 | * it does not overlap with any of the jump label addresses. | 244 | * it does not overlap with any of the jump label addresses. |
245 | * Caller must hold jump_label_mutex. | ||
234 | * | 246 | * |
235 | * returns 1 if there is an overlap, 0 otherwise | 247 | * returns 1 if there is an overlap, 0 otherwise |
236 | */ | 248 | */ |
@@ -241,7 +253,6 @@ int jump_label_text_reserved(void *start, void *end) | |||
241 | struct jump_entry *iter_stop = __start___jump_table; | 253 | struct jump_entry *iter_stop = __start___jump_table; |
242 | int conflict = 0; | 254 | int conflict = 0; |
243 | 255 | ||
244 | mutex_lock(&jump_label_mutex); | ||
245 | iter = iter_start; | 256 | iter = iter_start; |
246 | while (iter < iter_stop) { | 257 | while (iter < iter_stop) { |
247 | if (addr_conflict(iter, start, end)) { | 258 | if (addr_conflict(iter, start, end)) { |
@@ -256,10 +267,16 @@ int jump_label_text_reserved(void *start, void *end) | |||
256 | conflict = module_conflict(start, end); | 267 | conflict = module_conflict(start, end); |
257 | #endif | 268 | #endif |
258 | out: | 269 | out: |
259 | mutex_unlock(&jump_label_mutex); | ||
260 | return conflict; | 270 | return conflict; |
261 | } | 271 | } |
262 | 272 | ||
273 | /* | ||
274 | * Not all archs need this. | ||
275 | */ | ||
276 | void __weak arch_jump_label_text_poke_early(jump_label_t addr) | ||
277 | { | ||
278 | } | ||
279 | |||
263 | static __init int init_jump_label(void) | 280 | static __init int init_jump_label(void) |
264 | { | 281 | { |
265 | int ret; | 282 | int ret; |
@@ -267,7 +284,7 @@ static __init int init_jump_label(void) | |||
267 | struct jump_entry *iter_stop = __stop___jump_table; | 284 | struct jump_entry *iter_stop = __stop___jump_table; |
268 | struct jump_entry *iter; | 285 | struct jump_entry *iter; |
269 | 286 | ||
270 | mutex_lock(&jump_label_mutex); | 287 | jump_label_lock(); |
271 | ret = build_jump_label_hashtable(__start___jump_table, | 288 | ret = build_jump_label_hashtable(__start___jump_table, |
272 | __stop___jump_table); | 289 | __stop___jump_table); |
273 | iter = iter_start; | 290 | iter = iter_start; |
@@ -275,7 +292,7 @@ static __init int init_jump_label(void) | |||
275 | arch_jump_label_text_poke_early(iter->code); | 292 | arch_jump_label_text_poke_early(iter->code); |
276 | iter++; | 293 | iter++; |
277 | } | 294 | } |
278 | mutex_unlock(&jump_label_mutex); | 295 | jump_label_unlock(); |
279 | return ret; | 296 | return ret; |
280 | } | 297 | } |
281 | early_initcall(init_jump_label); | 298 | early_initcall(init_jump_label); |
@@ -366,6 +383,39 @@ static void remove_jump_label_module(struct module *mod) | |||
366 | } | 383 | } |
367 | } | 384 | } |
368 | 385 | ||
386 | static void remove_jump_label_module_init(struct module *mod) | ||
387 | { | ||
388 | struct hlist_head *head; | ||
389 | struct hlist_node *node, *node_next, *module_node, *module_node_next; | ||
390 | struct jump_label_entry *e; | ||
391 | struct jump_label_module_entry *e_module; | ||
392 | struct jump_entry *iter; | ||
393 | int i, count; | ||
394 | |||
395 | /* if the module doesn't have jump label entries, just return */ | ||
396 | if (!mod->num_jump_entries) | ||
397 | return; | ||
398 | |||
399 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { | ||
400 | head = &jump_label_table[i]; | ||
401 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { | ||
402 | hlist_for_each_entry_safe(e_module, module_node, | ||
403 | module_node_next, | ||
404 | &(e->modules), hlist) { | ||
405 | if (e_module->mod != mod) | ||
406 | continue; | ||
407 | count = e_module->nr_entries; | ||
408 | iter = e_module->table; | ||
409 | while (count--) { | ||
410 | if (within_module_init(iter->code, mod)) | ||
411 | iter->key = 0; | ||
412 | iter++; | ||
413 | } | ||
414 | } | ||
415 | } | ||
416 | } | ||
417 | } | ||
418 | |||
369 | static int | 419 | static int |
370 | jump_label_module_notify(struct notifier_block *self, unsigned long val, | 420 | jump_label_module_notify(struct notifier_block *self, unsigned long val, |
371 | void *data) | 421 | void *data) |
@@ -375,16 +425,21 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, | |||
375 | 425 | ||
376 | switch (val) { | 426 | switch (val) { |
377 | case MODULE_STATE_COMING: | 427 | case MODULE_STATE_COMING: |
378 | mutex_lock(&jump_label_mutex); | 428 | jump_label_lock(); |
379 | ret = add_jump_label_module(mod); | 429 | ret = add_jump_label_module(mod); |
380 | if (ret) | 430 | if (ret) |
381 | remove_jump_label_module(mod); | 431 | remove_jump_label_module(mod); |
382 | mutex_unlock(&jump_label_mutex); | 432 | jump_label_unlock(); |
383 | break; | 433 | break; |
384 | case MODULE_STATE_GOING: | 434 | case MODULE_STATE_GOING: |
385 | mutex_lock(&jump_label_mutex); | 435 | jump_label_lock(); |
386 | remove_jump_label_module(mod); | 436 | remove_jump_label_module(mod); |
387 | mutex_unlock(&jump_label_mutex); | 437 | jump_label_unlock(); |
438 | break; | ||
439 | case MODULE_STATE_LIVE: | ||
440 | jump_label_lock(); | ||
441 | remove_jump_label_module_init(mod); | ||
442 | jump_label_unlock(); | ||
388 | break; | 443 | break; |
389 | } | 444 | } |
390 | return ret; | 445 | return ret; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 99865c33a60d..9737a76e106f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -1145,14 +1145,13 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1145 | if (ret) | 1145 | if (ret) |
1146 | return ret; | 1146 | return ret; |
1147 | 1147 | ||
1148 | jump_label_lock(); | ||
1148 | preempt_disable(); | 1149 | preempt_disable(); |
1149 | if (!kernel_text_address((unsigned long) p->addr) || | 1150 | if (!kernel_text_address((unsigned long) p->addr) || |
1150 | in_kprobes_functions((unsigned long) p->addr) || | 1151 | in_kprobes_functions((unsigned long) p->addr) || |
1151 | ftrace_text_reserved(p->addr, p->addr) || | 1152 | ftrace_text_reserved(p->addr, p->addr) || |
1152 | jump_label_text_reserved(p->addr, p->addr)) { | 1153 | jump_label_text_reserved(p->addr, p->addr)) |
1153 | preempt_enable(); | 1154 | goto fail_with_jump_label; |
1154 | return -EINVAL; | ||
1155 | } | ||
1156 | 1155 | ||
1157 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ | 1156 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ |
1158 | p->flags &= KPROBE_FLAG_DISABLED; | 1157 | p->flags &= KPROBE_FLAG_DISABLED; |
@@ -1166,10 +1165,9 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1166 | * We must hold a refcount of the probed module while updating | 1165 | * We must hold a refcount of the probed module while updating |
1167 | * its code to prohibit unexpected unloading. | 1166 | * its code to prohibit unexpected unloading. |
1168 | */ | 1167 | */ |
1169 | if (unlikely(!try_module_get(probed_mod))) { | 1168 | if (unlikely(!try_module_get(probed_mod))) |
1170 | preempt_enable(); | 1169 | goto fail_with_jump_label; |
1171 | return -EINVAL; | 1170 | |
1172 | } | ||
1173 | /* | 1171 | /* |
1174 | * If the module freed .init.text, we couldn't insert | 1172 | * If the module freed .init.text, we couldn't insert |
1175 | * kprobes in there. | 1173 | * kprobes in there. |
@@ -1177,16 +1175,18 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1177 | if (within_module_init((unsigned long)p->addr, probed_mod) && | 1175 | if (within_module_init((unsigned long)p->addr, probed_mod) && |
1178 | probed_mod->state != MODULE_STATE_COMING) { | 1176 | probed_mod->state != MODULE_STATE_COMING) { |
1179 | module_put(probed_mod); | 1177 | module_put(probed_mod); |
1180 | preempt_enable(); | 1178 | goto fail_with_jump_label; |
1181 | return -EINVAL; | ||
1182 | } | 1179 | } |
1183 | } | 1180 | } |
1184 | preempt_enable(); | 1181 | preempt_enable(); |
1182 | jump_label_unlock(); | ||
1185 | 1183 | ||
1186 | p->nmissed = 0; | 1184 | p->nmissed = 0; |
1187 | INIT_LIST_HEAD(&p->list); | 1185 | INIT_LIST_HEAD(&p->list); |
1188 | mutex_lock(&kprobe_mutex); | 1186 | mutex_lock(&kprobe_mutex); |
1189 | 1187 | ||
1188 | jump_label_lock(); /* needed to call jump_label_text_reserved() */ | ||
1189 | |||
1190 | get_online_cpus(); /* For avoiding text_mutex deadlock. */ | 1190 | get_online_cpus(); /* For avoiding text_mutex deadlock. */ |
1191 | mutex_lock(&text_mutex); | 1191 | mutex_lock(&text_mutex); |
1192 | 1192 | ||
@@ -1214,12 +1214,18 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1214 | out: | 1214 | out: |
1215 | mutex_unlock(&text_mutex); | 1215 | mutex_unlock(&text_mutex); |
1216 | put_online_cpus(); | 1216 | put_online_cpus(); |
1217 | jump_label_unlock(); | ||
1217 | mutex_unlock(&kprobe_mutex); | 1218 | mutex_unlock(&kprobe_mutex); |
1218 | 1219 | ||
1219 | if (probed_mod) | 1220 | if (probed_mod) |
1220 | module_put(probed_mod); | 1221 | module_put(probed_mod); |
1221 | 1222 | ||
1222 | return ret; | 1223 | return ret; |
1224 | |||
1225 | fail_with_jump_label: | ||
1226 | preempt_enable(); | ||
1227 | jump_label_unlock(); | ||
1228 | return -EINVAL; | ||
1223 | } | 1229 | } |
1224 | EXPORT_SYMBOL_GPL(register_kprobe); | 1230 | EXPORT_SYMBOL_GPL(register_kprobe); |
1225 | 1231 | ||