diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/hp/sim/simserial.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/crash.c | 22 | ||||
-rw-r--r-- | arch/ia64/kernel/crash_dump.c | 48 | ||||
-rw-r--r-- | arch/ia64/kernel/jprobes.S | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 226 | ||||
-rw-r--r-- | arch/ia64/kernel/machine_kexec.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 33 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/traps.c | 50 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 9 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 9 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 15 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 64 |
16 files changed, 358 insertions, 149 deletions
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 1f16ebb9a800..324ea7565e2c 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -488,7 +488,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file, | |||
488 | 488 | ||
489 | #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) | 489 | #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) |
490 | 490 | ||
491 | static void rs_set_termios(struct tty_struct *tty, struct termios *old_termios) | 491 | static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) |
492 | { | 492 | { |
493 | unsigned int cflag = tty->termios->c_cflag; | 493 | unsigned int cflag = tty->termios->c_cflag; |
494 | 494 | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 8ae384eb5357..098ee605bf5e 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/ | |||
29 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o | 29 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o |
30 | obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o | 30 | obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o |
31 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o | 31 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o |
32 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | ||
32 | obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o | 33 | obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o |
33 | obj-$(CONFIG_AUDIT) += audit.o | 34 | obj-$(CONFIG_AUDIT) += audit.o |
34 | obj-$(CONFIG_PCI_MSI) += msi_ia64.o | 35 | obj-$(CONFIG_PCI_MSI) += msi_ia64.o |
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 0aabedf95dad..bc2f64d72244 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -19,29 +19,11 @@ | |||
19 | 19 | ||
20 | #include <asm/kdebug.h> | 20 | #include <asm/kdebug.h> |
21 | #include <asm/mca.h> | 21 | #include <asm/mca.h> |
22 | #include <asm/uaccess.h> | ||
23 | 22 | ||
24 | int kdump_status[NR_CPUS]; | 23 | int kdump_status[NR_CPUS]; |
25 | atomic_t kdump_cpu_freezed; | 24 | atomic_t kdump_cpu_freezed; |
26 | atomic_t kdump_in_progress; | 25 | atomic_t kdump_in_progress; |
27 | int kdump_on_init = 1; | 26 | int kdump_on_init = 1; |
28 | ssize_t | ||
29 | copy_oldmem_page(unsigned long pfn, char *buf, | ||
30 | size_t csize, unsigned long offset, int userbuf) | ||
31 | { | ||
32 | void *vaddr; | ||
33 | |||
34 | if (!csize) | ||
35 | return 0; | ||
36 | vaddr = __va(pfn<<PAGE_SHIFT); | ||
37 | if (userbuf) { | ||
38 | if (copy_to_user(buf, (vaddr + offset), csize)) { | ||
39 | return -EFAULT; | ||
40 | } | ||
41 | } else | ||
42 | memcpy(buf, (vaddr + offset), csize); | ||
43 | return csize; | ||
44 | } | ||
45 | 27 | ||
46 | static inline Elf64_Word | 28 | static inline Elf64_Word |
47 | *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, | 29 | *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, |
@@ -225,14 +207,10 @@ static ctl_table sys_table[] = { | |||
225 | static int | 207 | static int |
226 | machine_crash_setup(void) | 208 | machine_crash_setup(void) |
227 | { | 209 | { |
228 | char *from = strstr(saved_command_line, "elfcorehdr="); | ||
229 | static struct notifier_block kdump_init_notifier_nb = { | 210 | static struct notifier_block kdump_init_notifier_nb = { |
230 | .notifier_call = kdump_init_notifier, | 211 | .notifier_call = kdump_init_notifier, |
231 | }; | 212 | }; |
232 | int ret; | 213 | int ret; |
233 | if (from) | ||
234 | elfcorehdr_addr = memparse(from+11, &from); | ||
235 | saved_max_pfn = (unsigned long)-1; | ||
236 | if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) | 214 | if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) |
237 | return ret; | 215 | return ret; |
238 | #ifdef CONFIG_SYSCTL | 216 | #ifdef CONFIG_SYSCTL |
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c new file mode 100644 index 000000000000..83b8c91c1408 --- /dev/null +++ b/arch/ia64/kernel/crash_dump.c | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * kernel/crash_dump.c - Memory preserving reboot related code. | ||
3 | * | ||
4 | * Created by: Simon Horman <horms@verge.net.au> | ||
5 | * Original code moved from kernel/crash.c | ||
6 | * Original code comment copied from the i386 version of this file | ||
7 | */ | ||
8 | |||
9 | #include <linux/errno.h> | ||
10 | #include <linux/types.h> | ||
11 | |||
12 | #include <linux/uaccess.h> | ||
13 | |||
14 | /** | ||
15 | * copy_oldmem_page - copy one page from "oldmem" | ||
16 | * @pfn: page frame number to be copied | ||
17 | * @buf: target memory address for the copy; this can be in kernel address | ||
18 | * space or user address space (see @userbuf) | ||
19 | * @csize: number of bytes to copy | ||
20 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | ||
21 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | ||
22 | * otherwise @buf is in kernel address space, use memcpy(). | ||
23 | * | ||
24 | * Copy a page from "oldmem". For this page, there is no pte mapped | ||
25 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. | ||
26 | * | ||
27 | * Calling copy_to_user() in atomic context is not desirable. Hence first | ||
28 | * copying the data to a pre-allocated kernel page and then copying to user | ||
29 | * space in non-atomic context. | ||
30 | */ | ||
31 | ssize_t | ||
32 | copy_oldmem_page(unsigned long pfn, char *buf, | ||
33 | size_t csize, unsigned long offset, int userbuf) | ||
34 | { | ||
35 | void *vaddr; | ||
36 | |||
37 | if (!csize) | ||
38 | return 0; | ||
39 | vaddr = __va(pfn<<PAGE_SHIFT); | ||
40 | if (userbuf) { | ||
41 | if (copy_to_user(buf, (vaddr + offset), csize)) { | ||
42 | return -EFAULT; | ||
43 | } | ||
44 | } else | ||
45 | memcpy(buf, (vaddr + offset), csize); | ||
46 | return csize; | ||
47 | } | ||
48 | |||
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S index 5cd6226f44f2..621630256c4a 100644 --- a/arch/ia64/kernel/jprobes.S +++ b/arch/ia64/kernel/jprobes.S | |||
@@ -45,13 +45,14 @@ | |||
45 | * to the correct location. | 45 | * to the correct location. |
46 | */ | 46 | */ |
47 | #include <asm/asmmacro.h> | 47 | #include <asm/asmmacro.h> |
48 | #include <asm-ia64/break.h> | ||
48 | 49 | ||
49 | /* | 50 | /* |
50 | * void jprobe_break(void) | 51 | * void jprobe_break(void) |
51 | */ | 52 | */ |
52 | .section .kprobes.text, "ax" | 53 | .section .kprobes.text, "ax" |
53 | ENTRY(jprobe_break) | 54 | ENTRY(jprobe_break) |
54 | break.m 0x80300 | 55 | break.m __IA64_BREAK_JPROBE |
55 | END(jprobe_break) | 56 | END(jprobe_break) |
56 | 57 | ||
57 | /* | 58 | /* |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 76e778951e20..6cb56dd4056d 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -88,6 +88,7 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot, | |||
88 | { | 88 | { |
89 | p->ainsn.inst_flag = 0; | 89 | p->ainsn.inst_flag = 0; |
90 | p->ainsn.target_br_reg = 0; | 90 | p->ainsn.target_br_reg = 0; |
91 | p->ainsn.slot = slot; | ||
91 | 92 | ||
92 | /* Check for Break instruction | 93 | /* Check for Break instruction |
93 | * Bits 37:40 Major opcode to be zero | 94 | * Bits 37:40 Major opcode to be zero |
@@ -129,48 +130,6 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot, | |||
129 | 130 | ||
130 | /* | 131 | /* |
131 | * In this function we check to see if the instruction | 132 | * In this function we check to see if the instruction |
132 | * on which we are inserting kprobe is supported. | ||
133 | * Returns 0 if supported | ||
134 | * Returns -EINVAL if unsupported | ||
135 | */ | ||
136 | static int __kprobes unsupported_inst(uint template, uint slot, | ||
137 | uint major_opcode, | ||
138 | unsigned long kprobe_inst, | ||
139 | unsigned long addr) | ||
140 | { | ||
141 | if (bundle_encoding[template][slot] == I) { | ||
142 | switch (major_opcode) { | ||
143 | case 0x0: //I_UNIT_MISC_OPCODE: | ||
144 | /* | ||
145 | * Check for Integer speculation instruction | ||
146 | * - Bit 33-35 to be equal to 0x1 | ||
147 | */ | ||
148 | if (((kprobe_inst >> 33) & 0x7) == 1) { | ||
149 | printk(KERN_WARNING | ||
150 | "Kprobes on speculation inst at <0x%lx> not supported\n", | ||
151 | addr); | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * IP relative mov instruction | ||
157 | * - Bit 27-35 to be equal to 0x30 | ||
158 | */ | ||
159 | if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { | ||
160 | printk(KERN_WARNING | ||
161 | "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", | ||
162 | addr); | ||
163 | return -EINVAL; | ||
164 | |||
165 | } | ||
166 | } | ||
167 | } | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | |||
172 | /* | ||
173 | * In this function we check to see if the instruction | ||
174 | * (qp) cmpx.crel.ctype p1,p2=r2,r3 | 133 | * (qp) cmpx.crel.ctype p1,p2=r2,r3 |
175 | * on which we are inserting kprobe is cmp instruction | 134 | * on which we are inserting kprobe is cmp instruction |
176 | * with ctype as unc. | 135 | * with ctype as unc. |
@@ -206,26 +165,136 @@ out: | |||
206 | } | 165 | } |
207 | 166 | ||
208 | /* | 167 | /* |
168 | * In this function we check to see if the instruction | ||
169 | * on which we are inserting kprobe is supported. | ||
170 | * Returns qp value if supported | ||
171 | * Returns -EINVAL if unsupported | ||
172 | */ | ||
173 | static int __kprobes unsupported_inst(uint template, uint slot, | ||
174 | uint major_opcode, | ||
175 | unsigned long kprobe_inst, | ||
176 | unsigned long addr) | ||
177 | { | ||
178 | int qp; | ||
179 | |||
180 | qp = kprobe_inst & 0x3f; | ||
181 | if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { | ||
182 | if (slot == 1 && qp) { | ||
183 | printk(KERN_WARNING "Kprobes on cmp unc" | ||
184 | "instruction on slot 1 at <0x%lx>" | ||
185 | "is not supported\n", addr); | ||
186 | return -EINVAL; | ||
187 | |||
188 | } | ||
189 | qp = 0; | ||
190 | } | ||
191 | else if (bundle_encoding[template][slot] == I) { | ||
192 | if (major_opcode == 0) { | ||
193 | /* | ||
194 | * Check for Integer speculation instruction | ||
195 | * - Bit 33-35 to be equal to 0x1 | ||
196 | */ | ||
197 | if (((kprobe_inst >> 33) & 0x7) == 1) { | ||
198 | printk(KERN_WARNING | ||
199 | "Kprobes on speculation inst at <0x%lx> not supported\n", | ||
200 | addr); | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | /* | ||
204 | * IP relative mov instruction | ||
205 | * - Bit 27-35 to be equal to 0x30 | ||
206 | */ | ||
207 | if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { | ||
208 | printk(KERN_WARNING | ||
209 | "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", | ||
210 | addr); | ||
211 | return -EINVAL; | ||
212 | |||
213 | } | ||
214 | } | ||
215 | else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && | ||
216 | (kprobe_inst & (0x1UL << 12))) { | ||
217 | /* test bit instructions, tbit,tnat,tf | ||
218 | * bit 33-36 to be equal to 0 | ||
219 | * bit 12 to be equal to 1 | ||
220 | */ | ||
221 | if (slot == 1 && qp) { | ||
222 | printk(KERN_WARNING "Kprobes on test bit" | ||
223 | "instruction on slot at <0x%lx>" | ||
224 | "is not supported\n", addr); | ||
225 | return -EINVAL; | ||
226 | } | ||
227 | qp = 0; | ||
228 | } | ||
229 | } | ||
230 | else if (bundle_encoding[template][slot] == B) { | ||
231 | if (major_opcode == 7) { | ||
232 | /* IP-Relative Predict major code is 7 */ | ||
233 | printk(KERN_WARNING "Kprobes on IP-Relative" | ||
234 | "Predict is not supported\n"); | ||
235 | return -EINVAL; | ||
236 | } | ||
237 | else if (major_opcode == 2) { | ||
238 | /* Indirect Predict, major code is 2 | ||
239 | * bit 27-32 to be equal to 10 or 11 | ||
240 | */ | ||
241 | int x6=(kprobe_inst >> 27) & 0x3F; | ||
242 | if ((x6 == 0x10) || (x6 == 0x11)) { | ||
243 | printk(KERN_WARNING "Kprobes on" | ||
244 | "Indirect Predict is not supported\n"); | ||
245 | return -EINVAL; | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | /* kernel does not use float instruction, here for safety kprobe | ||
250 | * will judge whether it is fcmp/flass/float approximation instruction | ||
251 | */ | ||
252 | else if (unlikely(bundle_encoding[template][slot] == F)) { | ||
253 | if ((major_opcode == 4 || major_opcode == 5) && | ||
254 | (kprobe_inst & (0x1 << 12))) { | ||
255 | /* fcmp/fclass unc instruction */ | ||
256 | if (slot == 1 && qp) { | ||
257 | printk(KERN_WARNING "Kprobes on fcmp/fclass " | ||
258 | "instruction on slot at <0x%lx> " | ||
259 | "is not supported\n", addr); | ||
260 | return -EINVAL; | ||
261 | |||
262 | } | ||
263 | qp = 0; | ||
264 | } | ||
265 | if ((major_opcode == 0 || major_opcode == 1) && | ||
266 | (kprobe_inst & (0x1UL << 33))) { | ||
267 | /* float Approximation instruction */ | ||
268 | if (slot == 1 && qp) { | ||
269 | printk(KERN_WARNING "Kprobes on float Approx " | ||
270 | "instr at <0x%lx> is not supported\n", | ||
271 | addr); | ||
272 | return -EINVAL; | ||
273 | } | ||
274 | qp = 0; | ||
275 | } | ||
276 | } | ||
277 | return qp; | ||
278 | } | ||
279 | |||
280 | /* | ||
209 | * In this function we override the bundle with | 281 | * In this function we override the bundle with |
210 | * the break instruction at the given slot. | 282 | * the break instruction at the given slot. |
211 | */ | 283 | */ |
212 | static void __kprobes prepare_break_inst(uint template, uint slot, | 284 | static void __kprobes prepare_break_inst(uint template, uint slot, |
213 | uint major_opcode, | 285 | uint major_opcode, |
214 | unsigned long kprobe_inst, | 286 | unsigned long kprobe_inst, |
215 | struct kprobe *p) | 287 | struct kprobe *p, |
288 | int qp) | ||
216 | { | 289 | { |
217 | unsigned long break_inst = BREAK_INST; | 290 | unsigned long break_inst = BREAK_INST; |
218 | bundle_t *bundle = &p->opcode.bundle; | 291 | bundle_t *bundle = &p->opcode.bundle; |
219 | 292 | ||
220 | /* | 293 | /* |
221 | * Copy the original kprobe_inst qualifying predicate(qp) | 294 | * Copy the original kprobe_inst qualifying predicate(qp) |
222 | * to the break instruction iff !is_cmp_ctype_unc_inst | 295 | * to the break instruction |
223 | * because for cmp instruction with ctype equal to unc, | ||
224 | * which is a special instruction always needs to be | ||
225 | * executed regradless of qp | ||
226 | */ | 296 | */ |
227 | if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) | 297 | break_inst |= qp; |
228 | break_inst |= (0x3f & kprobe_inst); | ||
229 | 298 | ||
230 | switch (slot) { | 299 | switch (slot) { |
231 | case 0: | 300 | case 0: |
@@ -296,12 +365,6 @@ static int __kprobes valid_kprobe_addr(int template, int slot, | |||
296 | return -EINVAL; | 365 | return -EINVAL; |
297 | } | 366 | } |
298 | 367 | ||
299 | if (slot == 1 && bundle_encoding[template][1] != L) { | ||
300 | printk(KERN_WARNING "Inserting kprobes on slot #1 " | ||
301 | "is not supported\n"); | ||
302 | return -EINVAL; | ||
303 | } | ||
304 | |||
305 | return 0; | 368 | return 0; |
306 | } | 369 | } |
307 | 370 | ||
@@ -427,6 +490,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
427 | unsigned long kprobe_inst=0; | 490 | unsigned long kprobe_inst=0; |
428 | unsigned int slot = addr & 0xf, template, major_opcode = 0; | 491 | unsigned int slot = addr & 0xf, template, major_opcode = 0; |
429 | bundle_t *bundle; | 492 | bundle_t *bundle; |
493 | int qp; | ||
430 | 494 | ||
431 | bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; | 495 | bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; |
432 | template = bundle->quad0.template; | 496 | template = bundle->quad0.template; |
@@ -441,9 +505,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
441 | /* Get kprobe_inst and major_opcode from the bundle */ | 505 | /* Get kprobe_inst and major_opcode from the bundle */ |
442 | get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); | 506 | get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); |
443 | 507 | ||
444 | if (unsupported_inst(template, slot, major_opcode, kprobe_inst, addr)) | 508 | qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); |
445 | return -EINVAL; | 509 | if (qp < 0) |
446 | 510 | return -EINVAL; | |
447 | 511 | ||
448 | p->ainsn.insn = get_insn_slot(); | 512 | p->ainsn.insn = get_insn_slot(); |
449 | if (!p->ainsn.insn) | 513 | if (!p->ainsn.insn) |
@@ -451,30 +515,56 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
451 | memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); | 515 | memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); |
452 | memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); | 516 | memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); |
453 | 517 | ||
454 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p); | 518 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); |
455 | 519 | ||
456 | return 0; | 520 | return 0; |
457 | } | 521 | } |
458 | 522 | ||
459 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 523 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
460 | { | 524 | { |
461 | unsigned long addr = (unsigned long)p->addr; | 525 | unsigned long arm_addr; |
462 | unsigned long arm_addr = addr & ~0xFULL; | 526 | bundle_t *src, *dest; |
527 | |||
528 | arm_addr = ((unsigned long)p->addr) & ~0xFUL; | ||
529 | dest = &((kprobe_opcode_t *)arm_addr)->bundle; | ||
530 | src = &p->opcode.bundle; | ||
463 | 531 | ||
464 | flush_icache_range((unsigned long)p->ainsn.insn, | 532 | flush_icache_range((unsigned long)p->ainsn.insn, |
465 | (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); | 533 | (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); |
466 | memcpy((char *)arm_addr, &p->opcode, sizeof(kprobe_opcode_t)); | 534 | switch (p->ainsn.slot) { |
535 | case 0: | ||
536 | dest->quad0.slot0 = src->quad0.slot0; | ||
537 | break; | ||
538 | case 1: | ||
539 | dest->quad1.slot1_p1 = src->quad1.slot1_p1; | ||
540 | break; | ||
541 | case 2: | ||
542 | dest->quad1.slot2 = src->quad1.slot2; | ||
543 | break; | ||
544 | } | ||
467 | flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); | 545 | flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); |
468 | } | 546 | } |
469 | 547 | ||
470 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | 548 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
471 | { | 549 | { |
472 | unsigned long addr = (unsigned long)p->addr; | 550 | unsigned long arm_addr; |
473 | unsigned long arm_addr = addr & ~0xFULL; | 551 | bundle_t *src, *dest; |
474 | 552 | ||
553 | arm_addr = ((unsigned long)p->addr) & ~0xFUL; | ||
554 | dest = &((kprobe_opcode_t *)arm_addr)->bundle; | ||
475 | /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ | 555 | /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ |
476 | memcpy((char *) arm_addr, (char *) p->ainsn.insn, | 556 | src = &p->ainsn.insn->bundle; |
477 | sizeof(kprobe_opcode_t)); | 557 | switch (p->ainsn.slot) { |
558 | case 0: | ||
559 | dest->quad0.slot0 = src->quad0.slot0; | ||
560 | break; | ||
561 | case 1: | ||
562 | dest->quad1.slot1_p1 = src->quad1.slot1_p1; | ||
563 | break; | ||
564 | case 2: | ||
565 | dest->quad1.slot2 = src->quad1.slot2; | ||
566 | break; | ||
567 | } | ||
478 | flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); | 568 | flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); |
479 | } | 569 | } |
480 | 570 | ||
@@ -807,7 +897,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
807 | switch(val) { | 897 | switch(val) { |
808 | case DIE_BREAK: | 898 | case DIE_BREAK: |
809 | /* err is break number from ia64_bad_break() */ | 899 | /* err is break number from ia64_bad_break() */ |
810 | if (args->err == 0x80200 || args->err == 0x80300 || args->err == 0) | 900 | if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) |
901 | || args->err == __IA64_BREAK_JPROBE | ||
902 | || args->err == 0) | ||
811 | if (pre_kprobes_handler(args)) | 903 | if (pre_kprobes_handler(args)) |
812 | ret = NOTIFY_STOP; | 904 | ret = NOTIFY_STOP; |
813 | break; | 905 | break; |
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index 468233fa2cee..e2ccc9f660c5 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c | |||
@@ -19,8 +19,11 @@ | |||
19 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
20 | #include <asm/meminit.h> | 20 | #include <asm/meminit.h> |
21 | 21 | ||
22 | typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long, | 22 | typedef NORET_TYPE void (*relocate_new_kernel_t)( |
23 | struct ia64_boot_param *, unsigned long); | 23 | unsigned long indirection_page, |
24 | unsigned long start_address, | ||
25 | struct ia64_boot_param *boot_param, | ||
26 | unsigned long pal_addr) ATTRIB_NORET; | ||
24 | 27 | ||
25 | struct kimage *ia64_kimage; | 28 | struct kimage *ia64_kimage; |
26 | 29 | ||
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 87c1c4f42872..a76add3e76a2 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -1239,7 +1239,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1239 | } else { | 1239 | } else { |
1240 | /* Dump buffered message to console */ | 1240 | /* Dump buffered message to console */ |
1241 | ia64_mlogbuf_finish(1); | 1241 | ia64_mlogbuf_finish(1); |
1242 | #ifdef CONFIG_CRASH_DUMP | 1242 | #ifdef CONFIG_KEXEC |
1243 | atomic_set(&kdump_in_progress, 1); | 1243 | atomic_set(&kdump_in_progress, 1); |
1244 | monarch_cpu = -1; | 1244 | monarch_cpu = -1; |
1245 | #endif | 1245 | #endif |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 14e1200376a9..ad567b8d432e 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -256,7 +256,7 @@ reserve_memory (void) | |||
256 | 256 | ||
257 | #ifdef CONFIG_KEXEC | 257 | #ifdef CONFIG_KEXEC |
258 | /* crashkernel=size@offset specifies the size to reserve for a crash | 258 | /* crashkernel=size@offset specifies the size to reserve for a crash |
259 | * kernel.(offset is ingored for keep compatibility with other archs) | 259 | * kernel. If offset is 0, then it is determined automatically. |
260 | * By reserving this memory we guarantee that linux never set's it | 260 | * By reserving this memory we guarantee that linux never set's it |
261 | * up as a DMA target.Useful for holding code to do something | 261 | * up as a DMA target.Useful for holding code to do something |
262 | * appropriate after a kernel panic. | 262 | * appropriate after a kernel panic. |
@@ -266,10 +266,16 @@ reserve_memory (void) | |||
266 | unsigned long base, size; | 266 | unsigned long base, size; |
267 | if (from) { | 267 | if (from) { |
268 | size = memparse(from + 12, &from); | 268 | size = memparse(from + 12, &from); |
269 | if (*from == '@') | ||
270 | base = memparse(from+1, &from); | ||
271 | else | ||
272 | base = 0; | ||
269 | if (size) { | 273 | if (size) { |
270 | sort_regions(rsvd_region, n); | 274 | if (!base) { |
271 | base = kdump_find_rsvd_region(size, | 275 | sort_regions(rsvd_region, n); |
272 | rsvd_region, n); | 276 | base = kdump_find_rsvd_region(size, |
277 | rsvd_region, n); | ||
278 | } | ||
273 | if (base != ~0UL) { | 279 | if (base != ~0UL) { |
274 | rsvd_region[n].start = | 280 | rsvd_region[n].start = |
275 | (unsigned long)__va(base); | 281 | (unsigned long)__va(base); |
@@ -434,6 +440,21 @@ static __init int setup_nomca(char *s) | |||
434 | } | 440 | } |
435 | early_param("nomca", setup_nomca); | 441 | early_param("nomca", setup_nomca); |
436 | 442 | ||
443 | #ifdef CONFIG_PROC_VMCORE | ||
444 | /* elfcorehdr= specifies the location of elf core header | ||
445 | * stored by the crashed kernel. | ||
446 | */ | ||
447 | static int __init parse_elfcorehdr(char *arg) | ||
448 | { | ||
449 | if (!arg) | ||
450 | return -EINVAL; | ||
451 | |||
452 | elfcorehdr_addr = memparse(arg, &arg); | ||
453 | return 0; | ||
454 | } | ||
455 | early_param("elfcorehdr", parse_elfcorehdr); | ||
456 | #endif /* CONFIG_PROC_VMCORE */ | ||
457 | |||
437 | void __init | 458 | void __init |
438 | setup_arch (char **cmdline_p) | 459 | setup_arch (char **cmdline_p) |
439 | { | 460 | { |
@@ -653,6 +674,7 @@ get_model_name(__u8 family, __u8 model) | |||
653 | { | 674 | { |
654 | char brand[128]; | 675 | char brand[128]; |
655 | 676 | ||
677 | memcpy(brand, "Unknown", 8); | ||
656 | if (ia64_pal_get_brand_info(brand)) { | 678 | if (ia64_pal_get_brand_info(brand)) { |
657 | if (family == 0x7) | 679 | if (family == 0x7) |
658 | memcpy(brand, "Merced", 7); | 680 | memcpy(brand, "Merced", 7); |
@@ -660,8 +682,7 @@ get_model_name(__u8 family, __u8 model) | |||
660 | case 0: memcpy(brand, "McKinley", 9); break; | 682 | case 0: memcpy(brand, "McKinley", 9); break; |
661 | case 1: memcpy(brand, "Madison", 8); break; | 683 | case 1: memcpy(brand, "Madison", 8); break; |
662 | case 2: memcpy(brand, "Madison up to 9M cache", 23); break; | 684 | case 2: memcpy(brand, "Madison up to 9M cache", 23); break; |
663 | } else | 685 | } |
664 | memcpy(brand, "Unknown", 8); | ||
665 | } | 686 | } |
666 | if (brandname[0] == '\0') | 687 | if (brandname[0] == '\0') |
667 | return strcpy(brandname, brand); | 688 | return strcpy(brandname, brand); |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index b1b9aa4364b9..f4c7f7769cf7 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -157,7 +157,7 @@ handle_IPI (int irq, void *dev_id) | |||
157 | case IPI_CPU_STOP: | 157 | case IPI_CPU_STOP: |
158 | stop_this_cpu(); | 158 | stop_this_cpu(); |
159 | break; | 159 | break; |
160 | #ifdef CONFIG_CRASH_DUMP | 160 | #ifdef CONFIG_KEXEC |
161 | case IPI_KDUMP_CPU_STOP: | 161 | case IPI_KDUMP_CPU_STOP: |
162 | unw_init_running(kdump_cpu_freeze, NULL); | 162 | unw_init_running(kdump_cpu_freeze, NULL); |
163 | break; | 163 | break; |
@@ -219,7 +219,7 @@ send_IPI_self (int op) | |||
219 | send_IPI_single(smp_processor_id(), op); | 219 | send_IPI_single(smp_processor_id(), op); |
220 | } | 220 | } |
221 | 221 | ||
222 | #ifdef CONFIG_CRASH_DUMP | 222 | #ifdef CONFIG_KEXEC |
223 | void | 223 | void |
224 | kdump_smp_send_stop() | 224 | kdump_smp_send_stop() |
225 | { | 225 | { |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index fffa9e0826bc..ab684747036f 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -307,6 +307,15 @@ fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long | |||
307 | return ret.status; | 307 | return ret.status; |
308 | } | 308 | } |
309 | 309 | ||
310 | struct fpu_swa_msg { | ||
311 | unsigned long count; | ||
312 | unsigned long time; | ||
313 | }; | ||
314 | static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast); | ||
315 | DECLARE_PER_CPU(struct fpu_swa_msg, cpulast); | ||
316 | static struct fpu_swa_msg last __cacheline_aligned; | ||
317 | |||
318 | |||
310 | /* | 319 | /* |
311 | * Handle floating-point assist faults and traps. | 320 | * Handle floating-point assist faults and traps. |
312 | */ | 321 | */ |
@@ -316,8 +325,6 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) | |||
316 | long exception, bundle[2]; | 325 | long exception, bundle[2]; |
317 | unsigned long fault_ip; | 326 | unsigned long fault_ip; |
318 | struct siginfo siginfo; | 327 | struct siginfo siginfo; |
319 | static int fpu_swa_count = 0; | ||
320 | static unsigned long last_time; | ||
321 | 328 | ||
322 | fault_ip = regs->cr_iip; | 329 | fault_ip = regs->cr_iip; |
323 | if (!fp_fault && (ia64_psr(regs)->ri == 0)) | 330 | if (!fp_fault && (ia64_psr(regs)->ri == 0)) |
@@ -325,14 +332,37 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) | |||
325 | if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle))) | 332 | if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle))) |
326 | return -1; | 333 | return -1; |
327 | 334 | ||
328 | if (jiffies - last_time > 5*HZ) | 335 | if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { |
329 | fpu_swa_count = 0; | 336 | unsigned long count, current_jiffies = jiffies; |
330 | if ((fpu_swa_count < 4) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { | 337 | struct fpu_swa_msg *cp = &__get_cpu_var(cpulast); |
331 | last_time = jiffies; | 338 | |
332 | ++fpu_swa_count; | 339 | if (unlikely(current_jiffies > cp->time)) |
333 | printk(KERN_WARNING | 340 | cp->count = 0; |
334 | "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", | 341 | if (unlikely(cp->count < 5)) { |
335 | current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); | 342 | cp->count++; |
343 | cp->time = current_jiffies + 5 * HZ; | ||
344 | |||
345 | /* minimize races by grabbing a copy of count BEFORE checking last.time. */ | ||
346 | count = last.count; | ||
347 | barrier(); | ||
348 | |||
349 | /* | ||
350 | * Lower 4 bits are used as a count. Upper bits are a sequence | ||
351 | * number that is updated when count is reset. The cmpxchg will | ||
352 | * fail is seqno has changed. This minimizes mutiple cpus | ||
353 | * reseting the count. | ||
354 | */ | ||
355 | if (current_jiffies > last.time) | ||
356 | (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15)); | ||
357 | |||
358 | /* used fetchadd to atomically update the count */ | ||
359 | if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) { | ||
360 | last.time = current_jiffies + 5 * HZ; | ||
361 | printk(KERN_WARNING | ||
362 | "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", | ||
363 | current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); | ||
364 | } | ||
365 | } | ||
336 | } | 366 | } |
337 | 367 | ||
338 | exception = fp_emulate(fp_fault, bundle, ®s->cr_ipsr, ®s->ar_fpsr, &isr, ®s->pr, | 368 | exception = fp_emulate(fp_fault, bundle, ®s->cr_ipsr, ®s->ar_fpsr, &isr, ®s->pr, |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 82deaa3a7c48..1e79551231b9 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -174,6 +174,12 @@ find_memory (void) | |||
174 | reserve_bootmem(bootmap_start, bootmap_size); | 174 | reserve_bootmem(bootmap_start, bootmap_size); |
175 | 175 | ||
176 | find_initrd(); | 176 | find_initrd(); |
177 | |||
178 | #ifdef CONFIG_CRASH_DUMP | ||
179 | /* If we are doing a crash dump, we still need to know the real mem | ||
180 | * size before original memory map is * reset. */ | ||
181 | saved_max_pfn = max_pfn; | ||
182 | #endif | ||
177 | } | 183 | } |
178 | 184 | ||
179 | #ifdef CONFIG_SMP | 185 | #ifdef CONFIG_SMP |
@@ -226,7 +232,6 @@ void __init | |||
226 | paging_init (void) | 232 | paging_init (void) |
227 | { | 233 | { |
228 | unsigned long max_dma; | 234 | unsigned long max_dma; |
229 | unsigned long nid = 0; | ||
230 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 235 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
231 | 236 | ||
232 | num_physpages = 0; | 237 | num_physpages = 0; |
@@ -238,7 +243,7 @@ paging_init (void) | |||
238 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 243 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
239 | 244 | ||
240 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 245 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
241 | efi_memmap_walk(register_active_ranges, &nid); | 246 | efi_memmap_walk(register_active_ranges, NULL); |
242 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); | 247 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
243 | if (max_gap < LARGE_GAP) { | 248 | if (max_gap < LARGE_GAP) { |
244 | vmem_map = (struct page *) 0; | 249 | vmem_map = (struct page *) 0; |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 56dc2024220e..1a3d8a2feb94 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -595,14 +595,9 @@ find_largest_hole (u64 start, u64 end, void *arg) | |||
595 | } | 595 | } |
596 | 596 | ||
597 | int __init | 597 | int __init |
598 | register_active_ranges(u64 start, u64 end, void *nid) | 598 | register_active_ranges(u64 start, u64 end, void *arg) |
599 | { | 599 | { |
600 | BUG_ON(nid == NULL); | 600 | add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); |
601 | BUG_ON(*(unsigned long *)nid >= MAX_NUMNODES); | ||
602 | |||
603 | add_active_range(*(unsigned long *)nid, | ||
604 | __pa(start) >> PAGE_SHIFT, | ||
605 | __pa(end) >> PAGE_SHIFT); | ||
606 | return 0; | 601 | return 0; |
607 | } | 602 | } |
608 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | 603 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index a934ad069425..8571e52c2efd 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -580,7 +580,7 @@ void __cpuinit sn_cpu_init(void) | |||
580 | int slice; | 580 | int slice; |
581 | int cnode; | 581 | int cnode; |
582 | int i; | 582 | int i; |
583 | static int wars_have_been_checked; | 583 | static int wars_have_been_checked, set_cpu0_number; |
584 | 584 | ||
585 | cpuid = smp_processor_id(); | 585 | cpuid = smp_processor_id(); |
586 | if (cpuid == 0 && IS_MEDUSA()) { | 586 | if (cpuid == 0 && IS_MEDUSA()) { |
@@ -605,8 +605,16 @@ void __cpuinit sn_cpu_init(void) | |||
605 | /* | 605 | /* |
606 | * Don't check status. The SAL call is not supported on all PROMs | 606 | * Don't check status. The SAL call is not supported on all PROMs |
607 | * but a failure is harmless. | 607 | * but a failure is harmless. |
608 | * Architechtuallly, cpu_init is always called twice on cpu 0. We | ||
609 | * should set cpu_number on cpu 0 once. | ||
608 | */ | 610 | */ |
609 | (void) ia64_sn_set_cpu_number(cpuid); | 611 | if (cpuid == 0) { |
612 | if (!set_cpu0_number) { | ||
613 | (void) ia64_sn_set_cpu_number(cpuid); | ||
614 | set_cpu0_number = 1; | ||
615 | } | ||
616 | } else | ||
617 | (void) ia64_sn_set_cpu_number(cpuid); | ||
610 | 618 | ||
611 | /* | 619 | /* |
612 | * The boot cpu makes this call again after platform initialization is | 620 | * The boot cpu makes this call again after platform initialization is |
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 1f3540826e68..c08db9c2375d 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -632,7 +632,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
632 | ch->number, ch->partid); | 632 | ch->number, ch->partid); |
633 | 633 | ||
634 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 634 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
635 | xpc_create_kthreads(ch, 1); | 635 | xpc_create_kthreads(ch, 1, 0); |
636 | spin_lock_irqsave(&ch->lock, *irq_flags); | 636 | spin_lock_irqsave(&ch->lock, *irq_flags); |
637 | } | 637 | } |
638 | 638 | ||
@@ -754,12 +754,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
754 | 754 | ||
755 | /* make sure all activity has settled down first */ | 755 | /* make sure all activity has settled down first */ |
756 | 756 | ||
757 | if (atomic_read(&ch->references) > 0 || | 757 | if (atomic_read(&ch->kthreads_assigned) > 0 || |
758 | ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | 758 | atomic_read(&ch->references) > 0) { |
759 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) { | ||
760 | return; | 759 | return; |
761 | } | 760 | } |
762 | DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); | 761 | DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && |
762 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); | ||
763 | 763 | ||
764 | if (part->act_state == XPC_P_DEACTIVATING) { | 764 | if (part->act_state == XPC_P_DEACTIVATING) { |
765 | /* can't proceed until the other side disengages from us */ | 765 | /* can't proceed until the other side disengages from us */ |
@@ -1651,6 +1651,11 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1651 | /* wake all idle kthreads so they can exit */ | 1651 | /* wake all idle kthreads so they can exit */ |
1652 | if (atomic_read(&ch->kthreads_idle) > 0) { | 1652 | if (atomic_read(&ch->kthreads_idle) > 0) { |
1653 | wake_up_all(&ch->idle_wq); | 1653 | wake_up_all(&ch->idle_wq); |
1654 | |||
1655 | } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | ||
1656 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { | ||
1657 | /* start a kthread that will do the xpcDisconnecting callout */ | ||
1658 | xpc_create_kthreads(ch, 1, 1); | ||
1654 | } | 1659 | } |
1655 | 1660 | ||
1656 | /* wake those waiting to allocate an entry from the local msg queue */ | 1661 | /* wake those waiting to allocate an entry from the local msg queue */ |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index fa96dfc0e1aa..7a387d237363 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -681,7 +681,7 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) | |||
681 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", | 681 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", |
682 | needed, ch->partid, ch->number); | 682 | needed, ch->partid, ch->number); |
683 | 683 | ||
684 | xpc_create_kthreads(ch, needed); | 684 | xpc_create_kthreads(ch, needed, 0); |
685 | } | 685 | } |
686 | 686 | ||
687 | 687 | ||
@@ -775,26 +775,28 @@ xpc_daemonize_kthread(void *args) | |||
775 | xpc_kthread_waitmsgs(part, ch); | 775 | xpc_kthread_waitmsgs(part, ch); |
776 | } | 776 | } |
777 | 777 | ||
778 | if (atomic_dec_return(&ch->kthreads_assigned) == 0) { | 778 | /* let registerer know that connection is disconnecting */ |
779 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
780 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | ||
781 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { | ||
782 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; | ||
783 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
784 | 779 | ||
785 | xpc_disconnect_callout(ch, xpcDisconnecting); | 780 | spin_lock_irqsave(&ch->lock, irq_flags); |
786 | 781 | if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | |
787 | spin_lock_irqsave(&ch->lock, irq_flags); | 782 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
788 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; | 783 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; |
789 | } | ||
790 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 784 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
785 | |||
786 | xpc_disconnect_callout(ch, xpcDisconnecting); | ||
787 | |||
788 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
789 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; | ||
790 | } | ||
791 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
792 | |||
793 | if (atomic_dec_return(&ch->kthreads_assigned) == 0) { | ||
791 | if (atomic_dec_return(&part->nchannels_engaged) == 0) { | 794 | if (atomic_dec_return(&part->nchannels_engaged) == 0) { |
792 | xpc_mark_partition_disengaged(part); | 795 | xpc_mark_partition_disengaged(part); |
793 | xpc_IPI_send_disengage(part); | 796 | xpc_IPI_send_disengage(part); |
794 | } | 797 | } |
795 | } | 798 | } |
796 | 799 | ||
797 | |||
798 | xpc_msgqueue_deref(ch); | 800 | xpc_msgqueue_deref(ch); |
799 | 801 | ||
800 | dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", | 802 | dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", |
@@ -818,7 +820,8 @@ xpc_daemonize_kthread(void *args) | |||
818 | * partition. | 820 | * partition. |
819 | */ | 821 | */ |
820 | void | 822 | void |
821 | xpc_create_kthreads(struct xpc_channel *ch, int needed) | 823 | xpc_create_kthreads(struct xpc_channel *ch, int needed, |
824 | int ignore_disconnecting) | ||
822 | { | 825 | { |
823 | unsigned long irq_flags; | 826 | unsigned long irq_flags; |
824 | pid_t pid; | 827 | pid_t pid; |
@@ -833,16 +836,38 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
833 | * kthread. That kthread is responsible for doing the | 836 | * kthread. That kthread is responsible for doing the |
834 | * counterpart to the following before it exits. | 837 | * counterpart to the following before it exits. |
835 | */ | 838 | */ |
839 | if (ignore_disconnecting) { | ||
840 | if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { | ||
841 | /* kthreads assigned had gone to zero */ | ||
842 | BUG_ON(!(ch->flags & | ||
843 | XPC_C_DISCONNECTINGCALLOUT_MADE)); | ||
844 | break; | ||
845 | } | ||
846 | |||
847 | } else if (ch->flags & XPC_C_DISCONNECTING) { | ||
848 | break; | ||
849 | |||
850 | } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) { | ||
851 | if (atomic_inc_return(&part->nchannels_engaged) == 1) | ||
852 | xpc_mark_partition_engaged(part); | ||
853 | } | ||
836 | (void) xpc_part_ref(part); | 854 | (void) xpc_part_ref(part); |
837 | xpc_msgqueue_ref(ch); | 855 | xpc_msgqueue_ref(ch); |
838 | if (atomic_inc_return(&ch->kthreads_assigned) == 1 && | ||
839 | atomic_inc_return(&part->nchannels_engaged) == 1) { | ||
840 | xpc_mark_partition_engaged(part); | ||
841 | } | ||
842 | 856 | ||
843 | pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); | 857 | pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); |
844 | if (pid < 0) { | 858 | if (pid < 0) { |
845 | /* the fork failed */ | 859 | /* the fork failed */ |
860 | |||
861 | /* | ||
862 | * NOTE: if (ignore_disconnecting && | ||
863 | * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, | ||
864 | * then we'll deadlock if all other kthreads assigned | ||
865 | * to this channel are blocked in the channel's | ||
866 | * registerer, because the only thing that will unblock | ||
867 | * them is the xpcDisconnecting callout that this | ||
868 | * failed kernel_thread would have made. | ||
869 | */ | ||
870 | |||
846 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | 871 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && |
847 | atomic_dec_return(&part->nchannels_engaged) == 0) { | 872 | atomic_dec_return(&part->nchannels_engaged) == 0) { |
848 | xpc_mark_partition_disengaged(part); | 873 | xpc_mark_partition_disengaged(part); |
@@ -857,9 +882,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
857 | * Flag this as an error only if we have an | 882 | * Flag this as an error only if we have an |
858 | * insufficient #of kthreads for the channel | 883 | * insufficient #of kthreads for the channel |
859 | * to function. | 884 | * to function. |
860 | * | ||
861 | * No xpc_msgqueue_ref() is needed here since | ||
862 | * the channel mgr is doing this. | ||
863 | */ | 885 | */ |
864 | spin_lock_irqsave(&ch->lock, irq_flags); | 886 | spin_lock_irqsave(&ch->lock, irq_flags); |
865 | XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, | 887 | XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, |