diff options
author | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:14 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:14 -0400 |
commit | 390dfd95c5df1ab3921dd388d11b2aee332c3f2c (patch) | |
tree | 8d12e22961716a0137b9e41ed00e2521b88fecce | |
parent | 877105cc49f6e6ad32e3d63a214e8f537c0339ef (diff) |
percpu: make misc percpu symbols unique
This patch updates misc percpu related symbols such that percpu
symbols are unique and don't clash with local symbols. This serves
two purposes of decreasing the possibility of global percpu symbol
collision and allowing dropping per_cpu__ prefix from percpu symbols.
* drivers/crypto/padlock-aes.c: s/last_cword/paes_last_cword/
* drivers/lguest/x86/core.c: s/last_cpu/lg_last_cpu/
* drivers/s390/net/netiucv.c: rename the variable used in a macro to
avoid clashing with percpu symbol
* arch/mn10300/kernel/kprobes.c: replace current_ prefix with cur_ for
static variables. Please note that percpu symbol current_kprobe
can't be changed as it's used by generic code.
Partly based on Rusty Russell's "alloc_percpu: rename percpu vars
which cause name clashes" patch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Chuck Ebbert <cebbert@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
-rw-r--r-- | arch/mn10300/kernel/kprobes.c | 61 | ||||
-rw-r--r-- | drivers/crypto/padlock-aes.c | 12 | ||||
-rw-r--r-- | drivers/lguest/x86/core.c | 6 | ||||
-rw-r--r-- | drivers/s390/net/netiucv.c | 8 |
4 files changed, 42 insertions, 45 deletions
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c index dacafab00eb2..67e6389d625a 100644 --- a/arch/mn10300/kernel/kprobes.c +++ b/arch/mn10300/kernel/kprobes.c | |||
@@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); | |||
31 | #define KPROBE_HIT_ACTIVE 0x00000001 | 31 | #define KPROBE_HIT_ACTIVE 0x00000001 |
32 | #define KPROBE_HIT_SS 0x00000002 | 32 | #define KPROBE_HIT_SS 0x00000002 |
33 | 33 | ||
34 | static struct kprobe *current_kprobe; | 34 | static struct kprobe *cur_kprobe; |
35 | static unsigned long current_kprobe_orig_pc; | 35 | static unsigned long cur_kprobe_orig_pc; |
36 | static unsigned long current_kprobe_next_pc; | 36 | static unsigned long cur_kprobe_next_pc; |
37 | static int current_kprobe_ss_flags; | 37 | static int cur_kprobe_ss_flags; |
38 | static unsigned long kprobe_status; | 38 | static unsigned long kprobe_status; |
39 | static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2]; | 39 | static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2]; |
40 | static unsigned long current_kprobe_bp_addr; | 40 | static unsigned long cur_kprobe_bp_addr; |
41 | 41 | ||
42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
43 | 43 | ||
@@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
399 | { | 399 | { |
400 | unsigned long nextpc; | 400 | unsigned long nextpc; |
401 | 401 | ||
402 | current_kprobe_orig_pc = regs->pc; | 402 | cur_kprobe_orig_pc = regs->pc; |
403 | memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE); | 403 | memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE); |
404 | regs->pc = (unsigned long) current_kprobe_ss_buf; | 404 | regs->pc = (unsigned long) cur_kprobe_ss_buf; |
405 | 405 | ||
406 | nextpc = find_nextpc(regs, ¤t_kprobe_ss_flags); | 406 | nextpc = find_nextpc(regs, &cur_kprobe_ss_flags); |
407 | if (current_kprobe_ss_flags & SINGLESTEP_PCREL) | 407 | if (cur_kprobe_ss_flags & SINGLESTEP_PCREL) |
408 | current_kprobe_next_pc = | 408 | cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc); |
409 | current_kprobe_orig_pc + (nextpc - regs->pc); | ||
410 | else | 409 | else |
411 | current_kprobe_next_pc = nextpc; | 410 | cur_kprobe_next_pc = nextpc; |
412 | 411 | ||
413 | /* branching instructions need special handling */ | 412 | /* branching instructions need special handling */ |
414 | if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) | 413 | if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) |
415 | nextpc = singlestep_branch_setup(regs); | 414 | nextpc = singlestep_branch_setup(regs); |
416 | 415 | ||
417 | current_kprobe_bp_addr = nextpc; | 416 | cur_kprobe_bp_addr = nextpc; |
418 | 417 | ||
419 | *(u8 *) nextpc = BREAKPOINT_INSTRUCTION; | 418 | *(u8 *) nextpc = BREAKPOINT_INSTRUCTION; |
420 | mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf, | 419 | mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf, |
421 | sizeof(current_kprobe_ss_buf)); | 420 | sizeof(cur_kprobe_ss_buf)); |
422 | mn10300_icache_inv(); | 421 | mn10300_icache_inv(); |
423 | } | 422 | } |
424 | 423 | ||
@@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs) | |||
440 | disarm_kprobe(p, regs); | 439 | disarm_kprobe(p, regs); |
441 | ret = 1; | 440 | ret = 1; |
442 | } else { | 441 | } else { |
443 | p = current_kprobe; | 442 | p = cur_kprobe; |
444 | if (p->break_handler && p->break_handler(p, regs)) | 443 | if (p->break_handler && p->break_handler(p, regs)) |
445 | goto ss_probe; | 444 | goto ss_probe; |
446 | } | 445 | } |
@@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs) | |||
464 | } | 463 | } |
465 | 464 | ||
466 | kprobe_status = KPROBE_HIT_ACTIVE; | 465 | kprobe_status = KPROBE_HIT_ACTIVE; |
467 | current_kprobe = p; | 466 | cur_kprobe = p; |
468 | if (p->pre_handler(p, regs)) { | 467 | if (p->pre_handler(p, regs)) { |
469 | /* handler has already set things up, so skip ss setup */ | 468 | /* handler has already set things up, so skip ss setup */ |
470 | return 1; | 469 | return 1; |
@@ -491,8 +490,8 @@ no_kprobe: | |||
491 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | 490 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
492 | { | 491 | { |
493 | /* we may need to fixup regs/stack after singlestepping a call insn */ | 492 | /* we may need to fixup regs/stack after singlestepping a call insn */ |
494 | if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) { | 493 | if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) { |
495 | regs->pc = current_kprobe_orig_pc; | 494 | regs->pc = cur_kprobe_orig_pc; |
496 | switch (p->ainsn.insn[0]) { | 495 | switch (p->ainsn.insn[0]) { |
497 | case 0xcd: /* CALL (d16,PC) */ | 496 | case 0xcd: /* CALL (d16,PC) */ |
498 | *(unsigned *) regs->sp = regs->mdr = regs->pc + 5; | 497 | *(unsigned *) regs->sp = regs->mdr = regs->pc + 5; |
@@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
523 | } | 522 | } |
524 | } | 523 | } |
525 | 524 | ||
526 | regs->pc = current_kprobe_next_pc; | 525 | regs->pc = cur_kprobe_next_pc; |
527 | current_kprobe_bp_addr = 0; | 526 | cur_kprobe_bp_addr = 0; |
528 | } | 527 | } |
529 | 528 | ||
530 | static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) | 529 | static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) |
@@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
532 | if (!kprobe_running()) | 531 | if (!kprobe_running()) |
533 | return 0; | 532 | return 0; |
534 | 533 | ||
535 | if (current_kprobe->post_handler) | 534 | if (cur_kprobe->post_handler) |
536 | current_kprobe->post_handler(current_kprobe, regs, 0); | 535 | cur_kprobe->post_handler(cur_kprobe, regs, 0); |
537 | 536 | ||
538 | resume_execution(current_kprobe, regs); | 537 | resume_execution(cur_kprobe, regs); |
539 | reset_current_kprobe(); | 538 | reset_current_kprobe(); |
540 | preempt_enable_no_resched(); | 539 | preempt_enable_no_resched(); |
541 | return 1; | 540 | return 1; |
@@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs) | |||
545 | static inline | 544 | static inline |
546 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 545 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
547 | { | 546 | { |
548 | if (current_kprobe->fault_handler && | 547 | if (cur_kprobe->fault_handler && |
549 | current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | 548 | cur_kprobe->fault_handler(cur_kprobe, regs, trapnr)) |
550 | return 1; | 549 | return 1; |
551 | 550 | ||
552 | if (kprobe_status & KPROBE_HIT_SS) { | 551 | if (kprobe_status & KPROBE_HIT_SS) { |
553 | resume_execution(current_kprobe, regs); | 552 | resume_execution(cur_kprobe, regs); |
554 | reset_current_kprobe(); | 553 | reset_current_kprobe(); |
555 | preempt_enable_no_resched(); | 554 | preempt_enable_no_resched(); |
556 | } | 555 | } |
@@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
567 | 566 | ||
568 | switch (val) { | 567 | switch (val) { |
569 | case DIE_BREAKPOINT: | 568 | case DIE_BREAKPOINT: |
570 | if (current_kprobe_bp_addr != args->regs->pc) { | 569 | if (cur_kprobe_bp_addr != args->regs->pc) { |
571 | if (kprobe_handler(args->regs)) | 570 | if (kprobe_handler(args->regs)) |
572 | return NOTIFY_STOP; | 571 | return NOTIFY_STOP; |
573 | } else { | 572 | } else { |
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index a9952b1236b0..721d004a0235 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -64,7 +64,7 @@ struct aes_ctx { | |||
64 | u32 *D; | 64 | u32 *D; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static DEFINE_PER_CPU(struct cword *, last_cword); | 67 | static DEFINE_PER_CPU(struct cword *, paes_last_cword); |
68 | 68 | ||
69 | /* Tells whether the ACE is capable to generate | 69 | /* Tells whether the ACE is capable to generate |
70 | the extended key for a given key_len. */ | 70 | the extended key for a given key_len. */ |
@@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
152 | 152 | ||
153 | ok: | 153 | ok: |
154 | for_each_online_cpu(cpu) | 154 | for_each_online_cpu(cpu) |
155 | if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || | 155 | if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || |
156 | &ctx->cword.decrypt == per_cpu(last_cword, cpu)) | 156 | &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) |
157 | per_cpu(last_cword, cpu) = NULL; | 157 | per_cpu(paes_last_cword, cpu) = NULL; |
158 | 158 | ||
159 | return 0; | 159 | return 0; |
160 | } | 160 | } |
@@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword) | |||
166 | { | 166 | { |
167 | int cpu = raw_smp_processor_id(); | 167 | int cpu = raw_smp_processor_id(); |
168 | 168 | ||
169 | if (cword != per_cpu(last_cword, cpu)) | 169 | if (cword != per_cpu(paes_last_cword, cpu)) |
170 | #ifndef CONFIG_X86_64 | 170 | #ifndef CONFIG_X86_64 |
171 | asm volatile ("pushfl; popfl"); | 171 | asm volatile ("pushfl; popfl"); |
172 | #else | 172 | #else |
@@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword) | |||
176 | 176 | ||
177 | static inline void padlock_store_cword(struct cword *cword) | 177 | static inline void padlock_store_cword(struct cword *cword) |
178 | { | 178 | { |
179 | per_cpu(last_cword, raw_smp_processor_id()) = cword; | 179 | per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* | 182 | /* |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 6ae388849a3b..fb2b7ef7868e 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu) | |||
69 | (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); | 69 | (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); |
70 | } | 70 | } |
71 | 71 | ||
72 | static DEFINE_PER_CPU(struct lg_cpu *, last_cpu); | 72 | static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu); |
73 | 73 | ||
74 | /*S:010 | 74 | /*S:010 |
75 | * We approach the Switcher. | 75 | * We approach the Switcher. |
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) | |||
90 | * meanwhile). If that's not the case, we pretend everything in the | 90 | * meanwhile). If that's not the case, we pretend everything in the |
91 | * Guest has changed. | 91 | * Guest has changed. |
92 | */ | 92 | */ |
93 | if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { | 93 | if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) { |
94 | __get_cpu_var(last_cpu) = cpu; | 94 | __get_cpu_var(lg_last_cpu) = cpu; |
95 | cpu->last_pages = pages; | 95 | cpu->last_pages = pages; |
96 | cpu->changed = CHANGED_ALL; | 96 | cpu->changed = CHANGED_ALL; |
97 | } | 97 | } |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index c84eadd3602a..14e61441ba0b 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level) | |||
113 | #define IUCV_DBF_TEXT_(name, level, text...) \ | 113 | #define IUCV_DBF_TEXT_(name, level, text...) \ |
114 | do { \ | 114 | do { \ |
115 | if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ | 115 | if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ |
116 | char* iucv_dbf_txt_buf = \ | 116 | char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ |
117 | get_cpu_var(iucv_dbf_txt_buf); \ | 117 | sprintf(__buf, text); \ |
118 | sprintf(iucv_dbf_txt_buf, text); \ | 118 | debug_text_event(iucv_dbf_##name, level, __buf); \ |
119 | debug_text_event(iucv_dbf_##name, level, \ | ||
120 | iucv_dbf_txt_buf); \ | ||
121 | put_cpu_var(iucv_dbf_txt_buf); \ | 119 | put_cpu_var(iucv_dbf_txt_buf); \ |
122 | } \ | 120 | } \ |
123 | } while (0) | 121 | } while (0) |