diff options
author | Tony Luck <tony.luck@intel.com> | 2008-04-17 13:14:51 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-04-17 13:14:51 -0400 |
commit | 71b264f85ff50c14fe945ffff06ae0d5e9a9124e (patch) | |
tree | 9fd79c63fd630c4d030a97d254d42a3a73f1328b /arch/ia64/kernel | |
parent | f4df39cbdd9e9ab615e80148cc271db22a8508ad (diff) | |
parent | 072f042df335d7e0da2027637bcf720d7ff1589b (diff) |
Pull miscellaneous into release branch
Conflicts:
arch/ia64/kernel/mca.c
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/crash.c | 56 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 34 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 133 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 23 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/unaligned.c | 3 |
10 files changed, 214 insertions, 61 deletions
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 5865130b0a92..230a6f92367f 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #define ASM_OFFSETS_C 1 | 7 | #define ASM_OFFSETS_C 1 |
8 | 8 | ||
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/pid.h> | ||
10 | #include <linux/clocksource.h> | 11 | #include <linux/clocksource.h> |
11 | 12 | ||
12 | #include <asm-ia64/processor.h> | 13 | #include <asm-ia64/processor.h> |
@@ -34,6 +35,9 @@ void foo(void) | |||
34 | DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); | 35 | DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); |
35 | DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); | 36 | DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); |
36 | 37 | ||
38 | BUILD_BUG_ON(sizeof(struct upid) != 32); | ||
39 | DEFINE(IA64_UPID_SHIFT, 5); | ||
40 | |||
37 | BLANK(); | 41 | BLANK(); |
38 | 42 | ||
39 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 43 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
@@ -51,6 +55,9 @@ void foo(void) | |||
51 | DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); | 55 | DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); |
52 | DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); | 56 | DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); |
53 | DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); | 57 | DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); |
58 | DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid)); | ||
59 | DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level)); | ||
60 | DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0])); | ||
54 | DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); | 61 | DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); |
55 | DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); | 62 | DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); |
56 | DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); | 63 | DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); |
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index fbe742ad2fde..90ef338cf46f 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -24,6 +24,7 @@ int kdump_status[NR_CPUS]; | |||
24 | static atomic_t kdump_cpu_frozen; | 24 | static atomic_t kdump_cpu_frozen; |
25 | atomic_t kdump_in_progress; | 25 | atomic_t kdump_in_progress; |
26 | static int kdump_on_init = 1; | 26 | static int kdump_on_init = 1; |
27 | static int kdump_on_fatal_mca = 1; | ||
27 | 28 | ||
28 | static inline Elf64_Word | 29 | static inline Elf64_Word |
29 | *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, | 30 | *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, |
@@ -118,6 +119,7 @@ machine_crash_shutdown(struct pt_regs *pt) | |||
118 | static void | 119 | static void |
119 | machine_kdump_on_init(void) | 120 | machine_kdump_on_init(void) |
120 | { | 121 | { |
122 | crash_save_vmcoreinfo(); | ||
121 | local_irq_disable(); | 123 | local_irq_disable(); |
122 | kexec_disable_iosapic(); | 124 | kexec_disable_iosapic(); |
123 | machine_kexec(ia64_kimage); | 125 | machine_kexec(ia64_kimage); |
@@ -148,7 +150,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
148 | struct ia64_mca_notify_die *nd; | 150 | struct ia64_mca_notify_die *nd; |
149 | struct die_args *args = data; | 151 | struct die_args *args = data; |
150 | 152 | ||
151 | if (!kdump_on_init) | 153 | if (!kdump_on_init && !kdump_on_fatal_mca) |
152 | return NOTIFY_DONE; | 154 | return NOTIFY_DONE; |
153 | 155 | ||
154 | if (!ia64_kimage) { | 156 | if (!ia64_kimage) { |
@@ -173,32 +175,38 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
173 | return NOTIFY_DONE; | 175 | return NOTIFY_DONE; |
174 | 176 | ||
175 | switch (val) { | 177 | switch (val) { |
176 | case DIE_INIT_MONARCH_PROCESS: | 178 | case DIE_INIT_MONARCH_PROCESS: |
179 | if (kdump_on_init) { | ||
177 | atomic_set(&kdump_in_progress, 1); | 180 | atomic_set(&kdump_in_progress, 1); |
178 | *(nd->monarch_cpu) = -1; | 181 | *(nd->monarch_cpu) = -1; |
179 | break; | 182 | } |
180 | case DIE_INIT_MONARCH_LEAVE: | 183 | break; |
184 | case DIE_INIT_MONARCH_LEAVE: | ||
185 | if (kdump_on_init) | ||
181 | machine_kdump_on_init(); | 186 | machine_kdump_on_init(); |
182 | break; | 187 | break; |
183 | case DIE_INIT_SLAVE_LEAVE: | 188 | case DIE_INIT_SLAVE_LEAVE: |
184 | if (atomic_read(&kdump_in_progress)) | 189 | if (atomic_read(&kdump_in_progress)) |
185 | unw_init_running(kdump_cpu_freeze, NULL); | 190 | unw_init_running(kdump_cpu_freeze, NULL); |
186 | break; | 191 | break; |
187 | case DIE_MCA_RENDZVOUS_LEAVE: | 192 | case DIE_MCA_RENDZVOUS_LEAVE: |
188 | if (atomic_read(&kdump_in_progress)) | 193 | if (atomic_read(&kdump_in_progress)) |
189 | unw_init_running(kdump_cpu_freeze, NULL); | 194 | unw_init_running(kdump_cpu_freeze, NULL); |
190 | break; | 195 | break; |
191 | case DIE_MCA_MONARCH_LEAVE: | 196 | case DIE_MCA_MONARCH_LEAVE: |
192 | /* die_register->signr indicate if MCA is recoverable */ | 197 | /* die_register->signr indicate if MCA is recoverable */ |
193 | if (!args->signr) | 198 | if (kdump_on_fatal_mca && !args->signr) { |
194 | machine_kdump_on_init(); | 199 | atomic_set(&kdump_in_progress, 1); |
195 | break; | 200 | *(nd->monarch_cpu) = -1; |
201 | machine_kdump_on_init(); | ||
202 | } | ||
203 | break; | ||
196 | } | 204 | } |
197 | return NOTIFY_DONE; | 205 | return NOTIFY_DONE; |
198 | } | 206 | } |
199 | 207 | ||
200 | #ifdef CONFIG_SYSCTL | 208 | #ifdef CONFIG_SYSCTL |
201 | static ctl_table kdump_on_init_table[] = { | 209 | static ctl_table kdump_ctl_table[] = { |
202 | { | 210 | { |
203 | .ctl_name = CTL_UNNUMBERED, | 211 | .ctl_name = CTL_UNNUMBERED, |
204 | .procname = "kdump_on_init", | 212 | .procname = "kdump_on_init", |
@@ -207,6 +215,14 @@ static ctl_table kdump_on_init_table[] = { | |||
207 | .mode = 0644, | 215 | .mode = 0644, |
208 | .proc_handler = &proc_dointvec, | 216 | .proc_handler = &proc_dointvec, |
209 | }, | 217 | }, |
218 | { | ||
219 | .ctl_name = CTL_UNNUMBERED, | ||
220 | .procname = "kdump_on_fatal_mca", | ||
221 | .data = &kdump_on_fatal_mca, | ||
222 | .maxlen = sizeof(int), | ||
223 | .mode = 0644, | ||
224 | .proc_handler = &proc_dointvec, | ||
225 | }, | ||
210 | { .ctl_name = 0 } | 226 | { .ctl_name = 0 } |
211 | }; | 227 | }; |
212 | 228 | ||
@@ -215,7 +231,7 @@ static ctl_table sys_table[] = { | |||
215 | .ctl_name = CTL_KERN, | 231 | .ctl_name = CTL_KERN, |
216 | .procname = "kernel", | 232 | .procname = "kernel", |
217 | .mode = 0555, | 233 | .mode = 0555, |
218 | .child = kdump_on_init_table, | 234 | .child = kdump_ctl_table, |
219 | }, | 235 | }, |
220 | { .ctl_name = 0 } | 236 | { .ctl_name = 0 } |
221 | }; | 237 | }; |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 357b7e2adc63..c1625c7e1779 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -61,13 +61,29 @@ ENTRY(fsys_getpid) | |||
61 | .prologue | 61 | .prologue |
62 | .altrp b6 | 62 | .altrp b6 |
63 | .body | 63 | .body |
64 | add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 | ||
65 | ;; | ||
66 | ld8 r17=[r17] // r17 = current->group_leader | ||
64 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 | 67 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 |
65 | ;; | 68 | ;; |
66 | ld4 r9=[r9] | 69 | ld4 r9=[r9] |
67 | add r8=IA64_TASK_TGID_OFFSET,r16 | 70 | add r17=IA64_TASK_TGIDLINK_OFFSET,r17 |
68 | ;; | 71 | ;; |
69 | and r9=TIF_ALLWORK_MASK,r9 | 72 | and r9=TIF_ALLWORK_MASK,r9 |
70 | ld4 r8=[r8] // r8 = current->tgid | 73 | ld8 r17=[r17] // r17 = current->group_leader->pids[PIDTYPE_PID].pid |
74 | ;; | ||
75 | add r8=IA64_PID_LEVEL_OFFSET,r17 | ||
76 | ;; | ||
77 | ld4 r8=[r8] // r8 = pid->level | ||
78 | add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] | ||
79 | ;; | ||
80 | shl r8=r8,IA64_UPID_SHIFT | ||
81 | ;; | ||
82 | add r17=r17,r8 // r17 = &pid->numbers[pid->level] | ||
83 | ;; | ||
84 | ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr | ||
85 | ;; | ||
86 | mov r17=0 | ||
71 | ;; | 87 | ;; |
72 | cmp.ne p8,p0=0,r9 | 88 | cmp.ne p8,p0=0,r9 |
73 | (p8) br.spnt.many fsys_fallback_syscall | 89 | (p8) br.spnt.many fsys_fallback_syscall |
@@ -126,15 +142,25 @@ ENTRY(fsys_set_tid_address) | |||
126 | .altrp b6 | 142 | .altrp b6 |
127 | .body | 143 | .body |
128 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 | 144 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 |
145 | add r17=IA64_TASK_TGIDLINK_OFFSET,r16 | ||
129 | ;; | 146 | ;; |
130 | ld4 r9=[r9] | 147 | ld4 r9=[r9] |
131 | tnat.z p6,p7=r32 // check argument register for being NaT | 148 | tnat.z p6,p7=r32 // check argument register for being NaT |
149 | ld8 r17=[r17] // r17 = current->pids[PIDTYPE_PID].pid | ||
132 | ;; | 150 | ;; |
133 | and r9=TIF_ALLWORK_MASK,r9 | 151 | and r9=TIF_ALLWORK_MASK,r9 |
134 | add r8=IA64_TASK_PID_OFFSET,r16 | 152 | add r8=IA64_PID_LEVEL_OFFSET,r17 |
135 | add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 | 153 | add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16 |
136 | ;; | 154 | ;; |
137 | ld4 r8=[r8] | 155 | ld4 r8=[r8] // r8 = pid->level |
156 | add r17=IA64_PID_UPID_OFFSET,r17 // r17 = &pid->numbers[0] | ||
157 | ;; | ||
158 | shl r8=r8,IA64_UPID_SHIFT | ||
159 | ;; | ||
160 | add r17=r17,r8 // r17 = &pid->numbers[pid->level] | ||
161 | ;; | ||
162 | ld4 r8=[r17] // r8 = pid->numbers[pid->level].nr | ||
163 | ;; | ||
138 | cmp.ne p8,p0=0,r9 | 164 | cmp.ne p8,p0=0,r9 |
139 | mov r17=-1 | 165 | mov r17=-1 |
140 | ;; | 166 | ;; |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index d8be23fbe6bc..5538471e8d68 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -472,7 +472,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | |||
472 | static unsigned char count; | 472 | static unsigned char count; |
473 | static long last_time; | 473 | static long last_time; |
474 | 474 | ||
475 | if (jiffies - last_time > 5*HZ) | 475 | if (time_after(jiffies, last_time + 5 * HZ)) |
476 | count = 0; | 476 | count = 0; |
477 | if (++count < 5) { | 477 | if (++count < 5) { |
478 | last_time = jiffies; | 478 | last_time = jiffies; |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 8d9a446a0d17..233434f4f88f 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -78,6 +78,20 @@ static enum instruction_type bundle_encoding[32][3] = { | |||
78 | { u, u, u }, /* 1F */ | 78 | { u, u, u }, /* 1F */ |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* Insert a long branch code */ | ||
82 | static void __kprobes set_brl_inst(void *from, void *to) | ||
83 | { | ||
84 | s64 rel = ((s64) to - (s64) from) >> 4; | ||
85 | bundle_t *brl; | ||
86 | brl = (bundle_t *) ((u64) from & ~0xf); | ||
87 | brl->quad0.template = 0x05; /* [MLX](stop) */ | ||
88 | brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ | ||
89 | brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; | ||
90 | brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); | ||
91 | /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ | ||
92 | brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); | ||
93 | } | ||
94 | |||
81 | /* | 95 | /* |
82 | * In this function we check to see if the instruction | 96 | * In this function we check to see if the instruction |
83 | * is IP relative instruction and update the kprobe | 97 | * is IP relative instruction and update the kprobe |
@@ -496,6 +510,77 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | |||
496 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; | 510 | regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; |
497 | } | 511 | } |
498 | 512 | ||
513 | /* Check the instruction in the slot is break */ | ||
514 | static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) | ||
515 | { | ||
516 | unsigned int major_opcode; | ||
517 | unsigned int template = bundle->quad0.template; | ||
518 | unsigned long kprobe_inst; | ||
519 | |||
520 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ | ||
521 | if (slot == 1 && bundle_encoding[template][1] == L) | ||
522 | slot++; | ||
523 | |||
524 | /* Get Kprobe probe instruction at given slot*/ | ||
525 | get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); | ||
526 | |||
527 | /* For break instruction, | ||
528 | * Bits 37:40 Major opcode to be zero | ||
529 | * Bits 27:32 X6 to be zero | ||
530 | * Bits 32:35 X3 to be zero | ||
531 | */ | ||
532 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { | ||
533 | /* Not a break instruction */ | ||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | /* Is a break instruction */ | ||
538 | return 1; | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * In this function, we check whether the target bundle modifies IP or | ||
543 | * it triggers an exception. If so, it cannot be boostable. | ||
544 | */ | ||
545 | static int __kprobes can_boost(bundle_t *bundle, uint slot, | ||
546 | unsigned long bundle_addr) | ||
547 | { | ||
548 | unsigned int template = bundle->quad0.template; | ||
549 | |||
550 | do { | ||
551 | if (search_exception_tables(bundle_addr + slot) || | ||
552 | __is_ia64_break_inst(bundle, slot)) | ||
553 | return 0; /* exception may occur in this bundle*/ | ||
554 | } while ((++slot) < 3); | ||
555 | template &= 0x1e; | ||
556 | if (template >= 0x10 /* including B unit */ || | ||
557 | template == 0x04 /* including X unit */ || | ||
558 | template == 0x06) /* undefined */ | ||
559 | return 0; | ||
560 | |||
561 | return 1; | ||
562 | } | ||
563 | |||
564 | /* Prepare long jump bundle and disables other boosters if need */ | ||
565 | static void __kprobes prepare_booster(struct kprobe *p) | ||
566 | { | ||
567 | unsigned long addr = (unsigned long)p->addr & ~0xFULL; | ||
568 | unsigned int slot = (unsigned long)p->addr & 0xf; | ||
569 | struct kprobe *other_kp; | ||
570 | |||
571 | if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { | ||
572 | set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); | ||
573 | p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; | ||
574 | } | ||
575 | |||
576 | /* disables boosters in previous slots */ | ||
577 | for (; addr < (unsigned long)p->addr; addr++) { | ||
578 | other_kp = get_kprobe((void *)addr); | ||
579 | if (other_kp) | ||
580 | other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; | ||
581 | } | ||
582 | } | ||
583 | |||
499 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 584 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
500 | { | 585 | { |
501 | unsigned long addr = (unsigned long) p->addr; | 586 | unsigned long addr = (unsigned long) p->addr; |
@@ -530,6 +615,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
530 | 615 | ||
531 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); | 616 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); |
532 | 617 | ||
618 | prepare_booster(p); | ||
619 | |||
533 | return 0; | 620 | return 0; |
534 | } | 621 | } |
535 | 622 | ||
@@ -543,7 +630,9 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) | |||
543 | src = &p->opcode.bundle; | 630 | src = &p->opcode.bundle; |
544 | 631 | ||
545 | flush_icache_range((unsigned long)p->ainsn.insn, | 632 | flush_icache_range((unsigned long)p->ainsn.insn, |
546 | (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); | 633 | (unsigned long)p->ainsn.insn + |
634 | sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); | ||
635 | |||
547 | switch (p->ainsn.slot) { | 636 | switch (p->ainsn.slot) { |
548 | case 0: | 637 | case 0: |
549 | dest->quad0.slot0 = src->quad0.slot0; | 638 | dest->quad0.slot0 = src->quad0.slot0; |
@@ -584,13 +673,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
584 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 673 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
585 | { | 674 | { |
586 | mutex_lock(&kprobe_mutex); | 675 | mutex_lock(&kprobe_mutex); |
587 | free_insn_slot(p->ainsn.insn, 0); | 676 | free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); |
588 | mutex_unlock(&kprobe_mutex); | 677 | mutex_unlock(&kprobe_mutex); |
589 | } | 678 | } |
590 | /* | 679 | /* |
591 | * We are resuming execution after a single step fault, so the pt_regs | 680 | * We are resuming execution after a single step fault, so the pt_regs |
592 | * structure reflects the register state after we executed the instruction | 681 | * structure reflects the register state after we executed the instruction |
593 | * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust | 682 | * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust |
594 | * the ip to point back to the original stack address. To set the IP address | 683 | * the ip to point back to the original stack address. To set the IP address |
595 | * to original stack address, handle the case where we need to fixup the | 684 | * to original stack address, handle the case where we need to fixup the |
596 | * relative IP address and/or fixup branch register. | 685 | * relative IP address and/or fixup branch register. |
@@ -607,7 +696,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
607 | if (slot == 1 && bundle_encoding[template][1] == L) | 696 | if (slot == 1 && bundle_encoding[template][1] == L) |
608 | slot = 2; | 697 | slot = 2; |
609 | 698 | ||
610 | if (p->ainsn.inst_flag) { | 699 | if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { |
611 | 700 | ||
612 | if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { | 701 | if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { |
613 | /* Fix relative IP address */ | 702 | /* Fix relative IP address */ |
@@ -686,33 +775,12 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) | |||
686 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) | 775 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) |
687 | { | 776 | { |
688 | unsigned int slot = ia64_psr(regs)->ri; | 777 | unsigned int slot = ia64_psr(regs)->ri; |
689 | unsigned int template, major_opcode; | ||
690 | unsigned long kprobe_inst; | ||
691 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; | 778 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; |
692 | bundle_t bundle; | 779 | bundle_t bundle; |
693 | 780 | ||
694 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); | 781 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); |
695 | template = bundle.quad0.template; | ||
696 | |||
697 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ | ||
698 | if (slot == 1 && bundle_encoding[template][1] == L) | ||
699 | slot++; | ||
700 | 782 | ||
701 | /* Get Kprobe probe instruction at given slot*/ | 783 | return __is_ia64_break_inst(&bundle, slot); |
702 | get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); | ||
703 | |||
704 | /* For break instruction, | ||
705 | * Bits 37:40 Major opcode to be zero | ||
706 | * Bits 27:32 X6 to be zero | ||
707 | * Bits 32:35 X3 to be zero | ||
708 | */ | ||
709 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { | ||
710 | /* Not a break instruction */ | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | /* Is a break instruction */ | ||
715 | return 1; | ||
716 | } | 784 | } |
717 | 785 | ||
718 | static int __kprobes pre_kprobes_handler(struct die_args *args) | 786 | static int __kprobes pre_kprobes_handler(struct die_args *args) |
@@ -802,6 +870,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
802 | return 1; | 870 | return 1; |
803 | 871 | ||
804 | ss_probe: | 872 | ss_probe: |
873 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) | ||
874 | if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { | ||
875 | /* Boost up -- we can execute copied instructions directly */ | ||
876 | ia64_psr(regs)->ri = p->ainsn.slot; | ||
877 | regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; | ||
878 | /* turn single stepping off */ | ||
879 | ia64_psr(regs)->ss = 0; | ||
880 | |||
881 | reset_current_kprobe(); | ||
882 | preempt_enable_no_resched(); | ||
883 | return 1; | ||
884 | } | ||
885 | #endif | ||
805 | prepare_ss(p, regs); | 886 | prepare_ss(p, regs); |
806 | kcb->kprobe_status = KPROBE_HIT_SS; | 887 | kcb->kprobe_status = KPROBE_HIT_SS; |
807 | return 1; | 888 | return 1; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 607006a6a976..e51bced3b0fa 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -69,6 +69,7 @@ | |||
69 | * 2007-04-27 Russ Anderson <rja@sgi.com> | 69 | * 2007-04-27 Russ Anderson <rja@sgi.com> |
70 | * Support multiple cpus going through OS_MCA in the same event. | 70 | * Support multiple cpus going through OS_MCA in the same event. |
71 | */ | 71 | */ |
72 | #include <linux/jiffies.h> | ||
72 | #include <linux/types.h> | 73 | #include <linux/types.h> |
73 | #include <linux/init.h> | 74 | #include <linux/init.h> |
74 | #include <linux/sched.h> | 75 | #include <linux/sched.h> |
@@ -295,7 +296,8 @@ static void ia64_mlogbuf_dump_from_init(void) | |||
295 | if (mlogbuf_finished) | 296 | if (mlogbuf_finished) |
296 | return; | 297 | return; |
297 | 298 | ||
298 | if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) { | 299 | if (mlogbuf_timestamp && |
300 | time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) { | ||
299 | printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " | 301 | printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " |
300 | " and the system seems to be messed up.\n"); | 302 | " and the system seems to be messed up.\n"); |
301 | ia64_mlogbuf_finish(0); | 303 | ia64_mlogbuf_finish(0); |
@@ -1311,20 +1313,17 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1311 | } else { | 1313 | } else { |
1312 | /* Dump buffered message to console */ | 1314 | /* Dump buffered message to console */ |
1313 | ia64_mlogbuf_finish(1); | 1315 | ia64_mlogbuf_finish(1); |
1314 | #ifdef CONFIG_KEXEC | ||
1315 | atomic_set(&kdump_in_progress, 1); | ||
1316 | monarch_cpu = -1; | ||
1317 | #endif | ||
1318 | } | 1316 | } |
1317 | |||
1319 | if (__get_cpu_var(ia64_mca_tr_reload)) { | 1318 | if (__get_cpu_var(ia64_mca_tr_reload)) { |
1320 | mca_insert_tr(0x1); /*Reload dynamic itrs*/ | 1319 | mca_insert_tr(0x1); /*Reload dynamic itrs*/ |
1321 | mca_insert_tr(0x2); /*Reload dynamic itrs*/ | 1320 | mca_insert_tr(0x2); /*Reload dynamic itrs*/ |
1322 | } | 1321 | } |
1322 | |||
1323 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) | 1323 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) |
1324 | == NOTIFY_STOP) | 1324 | == NOTIFY_STOP) |
1325 | ia64_mca_spin(__func__); | 1325 | ia64_mca_spin(__func__); |
1326 | 1326 | ||
1327 | |||
1328 | if (atomic_dec_return(&mca_count) > 0) { | 1327 | if (atomic_dec_return(&mca_count) > 0) { |
1329 | int i; | 1328 | int i; |
1330 | 1329 | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index a2aabfdc80d9..d1d24f4598da 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -4204,10 +4204,10 @@ pfm_check_task_exist(pfm_context_t *ctx) | |||
4204 | do_each_thread (g, t) { | 4204 | do_each_thread (g, t) { |
4205 | if (t->thread.pfm_context == ctx) { | 4205 | if (t->thread.pfm_context == ctx) { |
4206 | ret = 0; | 4206 | ret = 0; |
4207 | break; | 4207 | goto out; |
4208 | } | 4208 | } |
4209 | } while_each_thread (g, t); | 4209 | } while_each_thread (g, t); |
4210 | 4210 | out: | |
4211 | read_unlock(&tasklist_lock); | 4211 | read_unlock(&tasklist_lock); |
4212 | 4212 | ||
4213 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); | 4213 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index b86a072418a2..5015ca1275ca 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -177,6 +177,29 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) | |||
177 | return 0; | 177 | return 0; |
178 | } | 178 | } |
179 | 179 | ||
180 | /* | ||
181 | * Similar to "filter_rsvd_memory()", but the reserved memory ranges | ||
182 | * are not filtered out. | ||
183 | */ | ||
184 | int __init | ||
185 | filter_memory(unsigned long start, unsigned long end, void *arg) | ||
186 | { | ||
187 | void (*func)(unsigned long, unsigned long, int); | ||
188 | |||
189 | #if IGNORE_PFN0 | ||
190 | if (start == PAGE_OFFSET) { | ||
191 | printk(KERN_WARNING "warning: skipping physical page 0\n"); | ||
192 | start += PAGE_SIZE; | ||
193 | if (start >= end) | ||
194 | return 0; | ||
195 | } | ||
196 | #endif | ||
197 | func = arg; | ||
198 | if (start < end) | ||
199 | call_pernode_memory(__pa(start), end - start, func); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
180 | static void __init | 203 | static void __init |
181 | sort_regions (struct rsvd_region *rsvd_region, int max) | 204 | sort_regions (struct rsvd_region *rsvd_region, int max) |
182 | { | 205 | { |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 32ee5979a042..16483be18c0b 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -400,9 +400,9 @@ smp_callin (void) | |||
400 | /* Setup the per cpu irq handling data structures */ | 400 | /* Setup the per cpu irq handling data structures */ |
401 | __setup_vector_irq(cpuid); | 401 | __setup_vector_irq(cpuid); |
402 | cpu_set(cpuid, cpu_online_map); | 402 | cpu_set(cpuid, cpu_online_map); |
403 | unlock_ipi_calllock(); | ||
404 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | 403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; |
405 | spin_unlock(&vector_lock); | 404 | spin_unlock(&vector_lock); |
405 | unlock_ipi_calllock(); | ||
406 | 406 | ||
407 | smp_setup_percpu_timer(); | 407 | smp_setup_percpu_timer(); |
408 | 408 | ||
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6903361d11a5..ff0e7c10faa7 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. | 13 | * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. |
14 | * 2001/01/17 Add support emulation of unaligned kernel accesses. | 14 | * 2001/01/17 Add support emulation of unaligned kernel accesses. |
15 | */ | 15 | */ |
16 | #include <linux/jiffies.h> | ||
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
18 | #include <linux/tty.h> | 19 | #include <linux/tty.h> |
@@ -1290,7 +1291,7 @@ within_logging_rate_limit (void) | |||
1290 | { | 1291 | { |
1291 | static unsigned long count, last_time; | 1292 | static unsigned long count, last_time; |
1292 | 1293 | ||
1293 | if (jiffies - last_time > 5*HZ) | 1294 | if (time_after(jiffies, last_time + 5 * HZ)) |
1294 | count = 0; | 1295 | count = 0; |
1295 | if (count < 5) { | 1296 | if (count < 5) { |
1296 | last_time = jiffies; | 1297 | last_time = jiffies; |