aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/Kconfig8
-rw-r--r--arch/ia64/kernel/Makefile5
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/esi.c205
-rw-r--r--arch/ia64/kernel/esi_stub.S96
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c4
-rw-r--r--arch/ia64/kernel/kprobes.c61
-rw-r--r--arch/ia64/kernel/mca.c234
-rw-r--r--arch/ia64/kernel/mca_asm.S9
-rw-r--r--arch/ia64/kernel/mca_drv.c54
-rw-r--r--arch/ia64/kernel/mca_drv.h4
-rw-r--r--arch/ia64/kernel/perfmon.c113
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c41
-rw-r--r--arch/ia64/kernel/smpboot.c24
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S8
-rw-r--r--arch/ia64/mm/contig.c17
-rw-r--r--arch/ia64/mm/discontig.c28
-rw-r--r--arch/ia64/sn/kernel/bte.c3
19 files changed, 745 insertions, 177 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index d5ee4fc8fe66..0b7f701d5cf7 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -423,6 +423,14 @@ config IA64_PALINFO
423config SGI_SN 423config SGI_SN
424 def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) 424 def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
425 425
426config IA64_ESI
427 bool "ESI (Extensible SAL Interface) support"
428 help
429 If you say Y here, support is built into the kernel to
430 make ESI calls. ESI calls are used to support vendor-specific
431 firmware extensions, such as the ability to inject memory-errors
432 for test-purposes. If you're unsure, say N.
433
426source "drivers/sn/Kconfig" 434source "drivers/sn/Kconfig"
427 435
428source "drivers/firmware/Kconfig" 436source "drivers/firmware/Kconfig"
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index ad8215a3c586..31497496eb4b 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -32,6 +32,11 @@ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
32obj-$(CONFIG_AUDIT) += audit.o 32obj-$(CONFIG_AUDIT) += audit.o
33mca_recovery-y += mca_drv.o mca_drv_asm.o 33mca_recovery-y += mca_drv.o mca_drv_asm.o
34 34
35obj-$(CONFIG_IA64_ESI) += esi.o
36ifneq ($(CONFIG_IA64_ESI),)
37obj-y += esi_stub.o # must be in kernel proper
38endif
39
35# The gate DSO image is built using a special linker script. 40# The gate DSO image is built using a special linker script.
36targets += gate.so gate-syms.o 41targets += gate.so gate-syms.o
37 42
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index fef06571be99..12701cf32d99 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1605,8 +1605,8 @@ sys_call_table:
1605 data8 sys_ni_syscall // 1295 reserved for ppoll 1605 data8 sys_ni_syscall // 1295 reserved for ppoll
1606 data8 sys_unshare 1606 data8 sys_unshare
1607 data8 sys_splice 1607 data8 sys_splice
1608 data8 sys_ni_syscall // reserved for set_robust_list 1608 data8 sys_set_robust_list
1609 data8 sys_ni_syscall // reserved for get_robust_list 1609 data8 sys_get_robust_list
1610 data8 sys_sync_file_range // 1300 1610 data8 sys_sync_file_range // 1300
1611 data8 sys_tee 1611 data8 sys_tee
1612 data8 sys_vmsplice 1612 data8 sys_vmsplice
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
new file mode 100644
index 000000000000..ebf4e988e78c
--- /dev/null
+++ b/arch/ia64/kernel/esi.c
@@ -0,0 +1,205 @@
1/*
2 * Extensible SAL Interface (ESI) support routines.
3 *
4 * Copyright (C) 2006 Hewlett-Packard Co
5 * Alex Williamson <alex.williamson@hp.com>
6 */
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/string.h>
11
12#include <asm/esi.h>
13#include <asm/sal.h>
14
15MODULE_AUTHOR("Alex Williamson <alex.williamson@hp.com>");
16MODULE_DESCRIPTION("Extensible SAL Interface (ESI) support");
17MODULE_LICENSE("GPL");
18
19#define MODULE_NAME "esi"
20
21#define ESI_TABLE_GUID \
22 EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \
23 0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4)
24
25enum esi_systab_entry_type {
26 ESI_DESC_ENTRY_POINT = 0
27};
28
29/*
30 * Entry type: Size:
31 * 0 48
32 */
33#define ESI_DESC_SIZE(type) "\060"[(unsigned) (type)]
34
35typedef struct ia64_esi_desc_entry_point {
36 u8 type;
37 u8 reserved1[15];
38 u64 esi_proc;
39 u64 gp;
40 efi_guid_t guid;
41} ia64_esi_desc_entry_point_t;
42
43struct pdesc {
44 void *addr;
45 void *gp;
46};
47
48static struct ia64_sal_systab *esi_systab;
49
50static int __init esi_init (void)
51{
52 efi_config_table_t *config_tables;
53 struct ia64_sal_systab *systab;
54 unsigned long esi = 0;
55 char *p;
56 int i;
57
58 config_tables = __va(efi.systab->tables);
59
60 for (i = 0; i < (int) efi.systab->nr_tables; ++i) {
61 if (efi_guidcmp(config_tables[i].guid, ESI_TABLE_GUID) == 0) {
62 esi = config_tables[i].table;
63 break;
64 }
65 }
66
67 if (!esi)
68 return -ENODEV;;
69
70 systab = __va(esi);
71
72 if (strncmp(systab->signature, "ESIT", 4) != 0) {
73 printk(KERN_ERR "bad signature in ESI system table!");
74 return -ENODEV;
75 }
76
77 p = (char *) (systab + 1);
78 for (i = 0; i < systab->entry_count; i++) {
79 /*
80 * The first byte of each entry type contains the type
81 * descriptor.
82 */
83 switch (*p) {
84 case ESI_DESC_ENTRY_POINT:
85 break;
86 default:
87 printk(KERN_WARNING "Unkown table type %d found in "
88 "ESI table, ignoring rest of table\n", *p);
89 return -ENODEV;
90 }
91
92 p += ESI_DESC_SIZE(*p);
93 }
94
95 esi_systab = systab;
96 return 0;
97}
98
99
100int ia64_esi_call (efi_guid_t guid, struct ia64_sal_retval *isrvp,
101 enum esi_proc_type proc_type, u64 func,
102 u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
103 u64 arg7)
104{
105 struct ia64_fpreg fr[6];
106 unsigned long flags = 0;
107 int i;
108 char *p;
109
110 if (!esi_systab)
111 return -1;
112
113 p = (char *) (esi_systab + 1);
114 for (i = 0; i < esi_systab->entry_count; i++) {
115 if (*p == ESI_DESC_ENTRY_POINT) {
116 ia64_esi_desc_entry_point_t *esi = (void *)p;
117 if (!efi_guidcmp(guid, esi->guid)) {
118 ia64_sal_handler esi_proc;
119 struct pdesc pdesc;
120
121 pdesc.addr = __va(esi->esi_proc);
122 pdesc.gp = __va(esi->gp);
123
124 esi_proc = (ia64_sal_handler) &pdesc;
125
126 ia64_save_scratch_fpregs(fr);
127 if (proc_type == ESI_PROC_SERIALIZED)
128 spin_lock_irqsave(&sal_lock, flags);
129 else if (proc_type == ESI_PROC_MP_SAFE)
130 local_irq_save(flags);
131 else
132 preempt_disable();
133 *isrvp = (*esi_proc)(func, arg1, arg2, arg3,
134 arg4, arg5, arg6, arg7);
135 if (proc_type == ESI_PROC_SERIALIZED)
136 spin_unlock_irqrestore(&sal_lock,
137 flags);
138 else if (proc_type == ESI_PROC_MP_SAFE)
139 local_irq_restore(flags);
140 else
141 preempt_enable();
142 ia64_load_scratch_fpregs(fr);
143 return 0;
144 }
145 }
146 p += ESI_DESC_SIZE(*p);
147 }
148 return -1;
149}
150EXPORT_SYMBOL_GPL(ia64_esi_call);
151
152int ia64_esi_call_phys (efi_guid_t guid, struct ia64_sal_retval *isrvp,
153 u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4,
154 u64 arg5, u64 arg6, u64 arg7)
155{
156 struct ia64_fpreg fr[6];
157 unsigned long flags;
158 u64 esi_params[8];
159 char *p;
160 int i;
161
162 if (!esi_systab)
163 return -1;
164
165 p = (char *) (esi_systab + 1);
166 for (i = 0; i < esi_systab->entry_count; i++) {
167 if (*p == ESI_DESC_ENTRY_POINT) {
168 ia64_esi_desc_entry_point_t *esi = (void *)p;
169 if (!efi_guidcmp(guid, esi->guid)) {
170 ia64_sal_handler esi_proc;
171 struct pdesc pdesc;
172
173 pdesc.addr = (void *)esi->esi_proc;
174 pdesc.gp = (void *)esi->gp;
175
176 esi_proc = (ia64_sal_handler) &pdesc;
177
178 esi_params[0] = func;
179 esi_params[1] = arg1;
180 esi_params[2] = arg2;
181 esi_params[3] = arg3;
182 esi_params[4] = arg4;
183 esi_params[5] = arg5;
184 esi_params[6] = arg6;
185 esi_params[7] = arg7;
186 ia64_save_scratch_fpregs(fr);
187 spin_lock_irqsave(&sal_lock, flags);
188 *isrvp = esi_call_phys(esi_proc, esi_params);
189 spin_unlock_irqrestore(&sal_lock, flags);
190 ia64_load_scratch_fpregs(fr);
191 return 0;
192 }
193 }
194 p += ESI_DESC_SIZE(*p);
195 }
196 return -1;
197}
198EXPORT_SYMBOL_GPL(ia64_esi_call_phys);
199
200static void __exit esi_exit (void)
201{
202}
203
204module_init(esi_init);
205module_exit(esi_exit); /* makes module removable... */
diff --git a/arch/ia64/kernel/esi_stub.S b/arch/ia64/kernel/esi_stub.S
new file mode 100644
index 000000000000..6b3d6c1f99b6
--- /dev/null
+++ b/arch/ia64/kernel/esi_stub.S
@@ -0,0 +1,96 @@
1/*
2 * ESI call stub.
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Alex Williamson <alex.williamson@hp.com>
6 *
7 * Based on EFI call stub by David Mosberger. The stub is virtually
8 * identical to the one for EFI phys-mode calls, except that ESI
9 * calls may have up to 8 arguments, so they get passed to this routine
10 * through memory.
11 *
12 * This stub allows us to make ESI calls in physical mode with interrupts
13 * turned off. ESI calls may not support calling from virtual mode.
14 *
15 * Google for "Extensible SAL specification" for a document describing the
16 * ESI standard.
17 */
18
19/*
20 * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System
21 * Abstraction Layer Specification", revision 2.6e). Note that
22 * psr.dfl and psr.dfh MUST be cleared, despite what this manual says.
23 * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call
24 * (the br.ia instruction fails unless psr.dfl and psr.dfh are
25 * cleared). Fortunately, SAL promises not to touch the floating
26 * point regs, so at least we don't have to save f2-f127.
27 */
28#define PSR_BITS_TO_CLEAR \
29 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
30 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
31 IA64_PSR_DFL | IA64_PSR_DFH)
32
33#define PSR_BITS_TO_SET \
34 (IA64_PSR_BN)
35
36#include <asm/processor.h>
37#include <asm/asmmacro.h>
38
39/*
40 * Inputs:
41 * in0 = address of function descriptor of ESI routine to call
42 * in1 = address of array of ESI parameters
43 *
44 * Outputs:
45 * r8 = result returned by called function
46 */
47GLOBAL_ENTRY(esi_call_phys)
48 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
49 alloc loc1=ar.pfs,2,7,8,0
50 ld8 r2=[in0],8 // load ESI function's entry point
51 mov loc0=rp
52 .body
53 ;;
54 ld8 out0=[in1],8 // ESI params loaded from array
55 ;; // passing all as inputs doesn't work
56 ld8 out1=[in1],8
57 ;;
58 ld8 out2=[in1],8
59 ;;
60 ld8 out3=[in1],8
61 ;;
62 ld8 out4=[in1],8
63 ;;
64 ld8 out5=[in1],8
65 ;;
66 ld8 out6=[in1],8
67 ;;
68 ld8 out7=[in1]
69 mov loc2=gp // save global pointer
70 mov loc4=ar.rsc // save RSE configuration
71 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
72 ;;
73 ld8 gp=[in0] // load ESI function's global pointer
74 movl r16=PSR_BITS_TO_CLEAR
75 mov loc3=psr // save processor status word
76 movl r17=PSR_BITS_TO_SET
77 ;;
78 or loc3=loc3,r17
79 mov b6=r2
80 ;;
81 andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
82 br.call.sptk.many rp=ia64_switch_mode_phys
83.ret0: mov loc5=r19 // old ar.bsp
84 mov loc6=r20 // old sp
85 br.call.sptk.many rp=b6 // call the ESI function
86.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
87 mov r16=loc3 // save virtual mode psr
88 mov r19=loc5 // save virtual mode bspstore
89 mov r20=loc6 // save virtual mode sp
90 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
91.ret2: mov ar.rsc=loc4 // restore RSE configuration
92 mov ar.pfs=loc1
93 mov rp=loc0
94 mov gp=loc2
95 br.ret.sptk.many rp
96END(esi_call_phys)
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 3ead20fb6f4b..879c1817bd1c 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -105,5 +105,9 @@ EXPORT_SYMBOL(ia64_spinlock_contention);
105# endif 105# endif
106#endif 106#endif
107 107
108#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
109extern void esi_call_phys (void);
110EXPORT_SYMBOL_GPL(esi_call_phys);
111#endif
108extern char ia64_ivt[]; 112extern char ia64_ivt[];
109EXPORT_SYMBOL(ia64_ivt); 113EXPORT_SYMBOL(ia64_ivt);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 781960f80b6f..169ec3a7156c 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -136,10 +136,8 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
136static int __kprobes unsupported_inst(uint template, uint slot, 136static int __kprobes unsupported_inst(uint template, uint slot,
137 uint major_opcode, 137 uint major_opcode,
138 unsigned long kprobe_inst, 138 unsigned long kprobe_inst,
139 struct kprobe *p) 139 unsigned long addr)
140{ 140{
141 unsigned long addr = (unsigned long)p->addr;
142
143 if (bundle_encoding[template][slot] == I) { 141 if (bundle_encoding[template][slot] == I) {
144 switch (major_opcode) { 142 switch (major_opcode) {
145 case 0x0: //I_UNIT_MISC_OPCODE: 143 case 0x0: //I_UNIT_MISC_OPCODE:
@@ -217,7 +215,7 @@ static void __kprobes prepare_break_inst(uint template, uint slot,
217 struct kprobe *p) 215 struct kprobe *p)
218{ 216{
219 unsigned long break_inst = BREAK_INST; 217 unsigned long break_inst = BREAK_INST;
220 bundle_t *bundle = &p->ainsn.insn.bundle; 218 bundle_t *bundle = &p->opcode.bundle;
221 219
222 /* 220 /*
223 * Copy the original kprobe_inst qualifying predicate(qp) 221 * Copy the original kprobe_inst qualifying predicate(qp)
@@ -423,11 +421,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
423 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 421 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
424 unsigned long kprobe_inst=0; 422 unsigned long kprobe_inst=0;
425 unsigned int slot = addr & 0xf, template, major_opcode = 0; 423 unsigned int slot = addr & 0xf, template, major_opcode = 0;
426 bundle_t *bundle = &p->ainsn.insn.bundle; 424 bundle_t *bundle;
427
428 memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
429 memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
430 425
426 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
431 template = bundle->quad0.template; 427 template = bundle->quad0.template;
432 428
433 if(valid_kprobe_addr(template, slot, addr)) 429 if(valid_kprobe_addr(template, slot, addr))
@@ -440,20 +436,19 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
440 /* Get kprobe_inst and major_opcode from the bundle */ 436 /* Get kprobe_inst and major_opcode from the bundle */
441 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 437 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
442 438
443 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p)) 439 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, addr))
444 return -EINVAL; 440 return -EINVAL;
445 441
446 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
447 442
448 return 0; 443 p->ainsn.insn = get_insn_slot();
449} 444 if (!p->ainsn.insn)
445 return -ENOMEM;
446 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
447 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
450 448
451void __kprobes flush_insn_slot(struct kprobe *p) 449 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
452{
453 unsigned long arm_addr;
454 450
455 arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL; 451 return 0;
456 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
457} 452}
458 453
459void __kprobes arch_arm_kprobe(struct kprobe *p) 454void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -461,9 +456,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
461 unsigned long addr = (unsigned long)p->addr; 456 unsigned long addr = (unsigned long)p->addr;
462 unsigned long arm_addr = addr & ~0xFULL; 457 unsigned long arm_addr = addr & ~0xFULL;
463 458
464 flush_insn_slot(p); 459 flush_icache_range((unsigned long)p->ainsn.insn,
465 memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t)); 460 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
466 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 461 memcpy((char *)arm_addr, &p->opcode, sizeof(kprobe_opcode_t));
462 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
467} 463}
468 464
469void __kprobes arch_disarm_kprobe(struct kprobe *p) 465void __kprobes arch_disarm_kprobe(struct kprobe *p)
@@ -471,11 +467,18 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
471 unsigned long addr = (unsigned long)p->addr; 467 unsigned long addr = (unsigned long)p->addr;
472 unsigned long arm_addr = addr & ~0xFULL; 468 unsigned long arm_addr = addr & ~0xFULL;
473 469
474 /* p->opcode contains the original unaltered bundle */ 470 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
475 memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t)); 471 memcpy((char *) arm_addr, (char *) p->ainsn.insn,
476 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 472 sizeof(kprobe_opcode_t));
473 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
477} 474}
478 475
476void __kprobes arch_remove_kprobe(struct kprobe *p)
477{
478 mutex_lock(&kprobe_mutex);
479 free_insn_slot(p->ainsn.insn);
480 mutex_unlock(&kprobe_mutex);
481}
479/* 482/*
480 * We are resuming execution after a single step fault, so the pt_regs 483 * We are resuming execution after a single step fault, so the pt_regs
481 * structure reflects the register state after we executed the instruction 484 * structure reflects the register state after we executed the instruction
@@ -486,12 +489,12 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
486 */ 489 */
487static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 490static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
488{ 491{
489 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; 492 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
490 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 493 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
491 unsigned long template; 494 unsigned long template;
492 int slot = ((unsigned long)p->addr & 0xf); 495 int slot = ((unsigned long)p->addr & 0xf);
493 496
494 template = p->opcode.bundle.quad0.template; 497 template = p->ainsn.insn->bundle.quad0.template;
495 498
496 if (slot == 1 && bundle_encoding[template][1] == L) 499 if (slot == 1 && bundle_encoding[template][1] == L)
497 slot = 2; 500 slot = 2;
@@ -553,7 +556,7 @@ turn_ss_off:
553 556
554static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) 557static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
555{ 558{
556 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; 559 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle;
557 unsigned long slot = (unsigned long)p->addr & 0xf; 560 unsigned long slot = (unsigned long)p->addr & 0xf;
558 561
559 /* single step inline if break instruction */ 562 /* single step inline if break instruction */
@@ -768,6 +771,12 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
768 */ 771 */
769 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 772 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
770 return 1; 773 return 1;
774 /*
775 * In case the user-specified fault handler returned
776 * zero, try to fix up.
777 */
778 if (ia64_done_with_exception(regs))
779 return 1;
771 780
772 /* 781 /*
773 * Let ia64_do_page_fault() fix it. 782 * Let ia64_do_page_fault() fix it.
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 2fbe4536fe18..bfbd8986153b 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -54,6 +54,9 @@
54 * 54 *
55 * 2005-10-07 Keith Owens <kaos@sgi.com> 55 * 2005-10-07 Keith Owens <kaos@sgi.com>
56 * Add notify_die() hooks. 56 * Add notify_die() hooks.
57 *
58 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
59 * Add printing support for MCA/INIT.
57 */ 60 */
58#include <linux/types.h> 61#include <linux/types.h>
59#include <linux/init.h> 62#include <linux/init.h>
@@ -136,11 +139,175 @@ extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
136 139
137static int mca_init __initdata; 140static int mca_init __initdata;
138 141
142/*
143 * limited & delayed printing support for MCA/INIT handler
144 */
145
146#define mprintk(fmt...) ia64_mca_printk(fmt)
147
148#define MLOGBUF_SIZE (512+256*NR_CPUS)
149#define MLOGBUF_MSGMAX 256
150static char mlogbuf[MLOGBUF_SIZE];
151static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */
152static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */
153static unsigned long mlogbuf_start;
154static unsigned long mlogbuf_end;
155static unsigned int mlogbuf_finished = 0;
156static unsigned long mlogbuf_timestamp = 0;
157
158static int loglevel_save = -1;
159#define BREAK_LOGLEVEL(__console_loglevel) \
160 oops_in_progress = 1; \
161 if (loglevel_save < 0) \
162 loglevel_save = __console_loglevel; \
163 __console_loglevel = 15;
164
165#define RESTORE_LOGLEVEL(__console_loglevel) \
166 if (loglevel_save >= 0) { \
167 __console_loglevel = loglevel_save; \
168 loglevel_save = -1; \
169 } \
170 mlogbuf_finished = 0; \
171 oops_in_progress = 0;
172
173/*
174 * Push messages into buffer, print them later if not urgent.
175 */
176void ia64_mca_printk(const char *fmt, ...)
177{
178 va_list args;
179 int printed_len;
180 char temp_buf[MLOGBUF_MSGMAX];
181 char *p;
182
183 va_start(args, fmt);
184 printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
185 va_end(args);
186
187 /* Copy the output into mlogbuf */
188 if (oops_in_progress) {
189 /* mlogbuf was abandoned, use printk directly instead. */
190 printk(temp_buf);
191 } else {
192 spin_lock(&mlogbuf_wlock);
193 for (p = temp_buf; *p; p++) {
194 unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
195 if (next != mlogbuf_start) {
196 mlogbuf[mlogbuf_end] = *p;
197 mlogbuf_end = next;
198 } else {
199 /* buffer full */
200 break;
201 }
202 }
203 mlogbuf[mlogbuf_end] = '\0';
204 spin_unlock(&mlogbuf_wlock);
205 }
206}
207EXPORT_SYMBOL(ia64_mca_printk);
208
209/*
210 * Print buffered messages.
211 * NOTE: call this after returning normal context. (ex. from salinfod)
212 */
213void ia64_mlogbuf_dump(void)
214{
215 char temp_buf[MLOGBUF_MSGMAX];
216 char *p;
217 unsigned long index;
218 unsigned long flags;
219 unsigned int printed_len;
220
221 /* Get output from mlogbuf */
222 while (mlogbuf_start != mlogbuf_end) {
223 temp_buf[0] = '\0';
224 p = temp_buf;
225 printed_len = 0;
226
227 spin_lock_irqsave(&mlogbuf_rlock, flags);
228
229 index = mlogbuf_start;
230 while (index != mlogbuf_end) {
231 *p = mlogbuf[index];
232 index = (index + 1) % MLOGBUF_SIZE;
233 if (!*p)
234 break;
235 p++;
236 if (++printed_len >= MLOGBUF_MSGMAX - 1)
237 break;
238 }
239 *p = '\0';
240 if (temp_buf[0])
241 printk(temp_buf);
242 mlogbuf_start = index;
243
244 mlogbuf_timestamp = 0;
245 spin_unlock_irqrestore(&mlogbuf_rlock, flags);
246 }
247}
248EXPORT_SYMBOL(ia64_mlogbuf_dump);
249
250/*
251 * Call this if system is going to down or if immediate flushing messages to
252 * console is required. (ex. recovery was failed, crash dump is going to be
253 * invoked, long-wait rendezvous etc.)
254 * NOTE: this should be called from monarch.
255 */
256static void ia64_mlogbuf_finish(int wait)
257{
258 BREAK_LOGLEVEL(console_loglevel);
259
260 spin_lock_init(&mlogbuf_rlock);
261 ia64_mlogbuf_dump();
262 printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
263 "MCA/INIT might be dodgy or fail.\n");
264
265 if (!wait)
266 return;
267
268 /* wait for console */
269 printk("Delaying for 5 seconds...\n");
270 udelay(5*1000000);
271
272 mlogbuf_finished = 1;
273}
274EXPORT_SYMBOL(ia64_mlogbuf_finish);
275
276/*
277 * Print buffered messages from INIT context.
278 */
279static void ia64_mlogbuf_dump_from_init(void)
280{
281 if (mlogbuf_finished)
282 return;
283
284 if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) {
285 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
286 " and the system seems to be messed up.\n");
287 ia64_mlogbuf_finish(0);
288 return;
289 }
290
291 if (!spin_trylock(&mlogbuf_rlock)) {
292 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
293 "Generated messages other than stack dump will be "
294 "buffered to mlogbuf and will be printed later.\n");
295 printk(KERN_ERR "INIT: If messages would not printed after "
296 "this INIT, wait 30sec and assert INIT again.\n");
297 if (!mlogbuf_timestamp)
298 mlogbuf_timestamp = jiffies;
299 return;
300 }
301 spin_unlock(&mlogbuf_rlock);
302 ia64_mlogbuf_dump();
303}
139 304
140static void inline 305static void inline
141ia64_mca_spin(const char *func) 306ia64_mca_spin(const char *func)
142{ 307{
143 printk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); 308 if (monarch_cpu == smp_processor_id())
309 ia64_mlogbuf_finish(0);
310 mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
144 while (1) 311 while (1)
145 cpu_relax(); 312 cpu_relax();
146} 313}
@@ -344,9 +511,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
344 /* SAL spec states this should run w/ interrupts enabled */ 511 /* SAL spec states this should run w/ interrupts enabled */
345 local_irq_enable(); 512 local_irq_enable();
346 513
347 /* Get the CPE error record and log it */
348 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
349
350 spin_lock(&cpe_history_lock); 514 spin_lock(&cpe_history_lock);
351 if (!cpe_poll_enabled && cpe_vector >= 0) { 515 if (!cpe_poll_enabled && cpe_vector >= 0) {
352 516
@@ -375,7 +539,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
375 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); 539 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
376 540
377 /* lock already released, get out now */ 541 /* lock already released, get out now */
378 return IRQ_HANDLED; 542 goto out;
379 } else { 543 } else {
380 cpe_history[index++] = now; 544 cpe_history[index++] = now;
381 if (index == CPE_HISTORY_LENGTH) 545 if (index == CPE_HISTORY_LENGTH)
@@ -383,6 +547,10 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
383 } 547 }
384 } 548 }
385 spin_unlock(&cpe_history_lock); 549 spin_unlock(&cpe_history_lock);
550out:
551 /* Get the CPE error record and log it */
552 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
553
386 return IRQ_HANDLED; 554 return IRQ_HANDLED;
387} 555}
388 556
@@ -988,18 +1156,22 @@ ia64_wait_for_slaves(int monarch, const char *type)
988 } 1156 }
989 if (!missing) 1157 if (!missing)
990 goto all_in; 1158 goto all_in;
991 printk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); 1159 /*
1160 * Maybe slave(s) dead. Print buffered messages immediately.
1161 */
1162 ia64_mlogbuf_finish(0);
1163 mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
992 for_each_online_cpu(c) { 1164 for_each_online_cpu(c) {
993 if (c == monarch) 1165 if (c == monarch)
994 continue; 1166 continue;
995 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) 1167 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
996 printk(" %d", c); 1168 mprintk(" %d", c);
997 } 1169 }
998 printk("\n"); 1170 mprintk("\n");
999 return; 1171 return;
1000 1172
1001all_in: 1173all_in:
1002 printk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); 1174 mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
1003 return; 1175 return;
1004} 1176}
1005 1177
@@ -1027,10 +1199,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1027 struct ia64_mca_notify_die nd = 1199 struct ia64_mca_notify_die nd =
1028 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1200 { .sos = sos, .monarch_cpu = &monarch_cpu };
1029 1201
1030 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ 1202 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
1031 console_loglevel = 15; /* make sure printks make it to console */ 1203 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
1032 printk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d monarch=%ld\n",
1033 sos->proc_state_param, cpu, sos->monarch);
1034 1204
1035 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1205 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1036 monarch_cpu = cpu; 1206 monarch_cpu = cpu;
@@ -1066,6 +1236,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1066 rh->severity = sal_log_severity_corrected; 1236 rh->severity = sal_log_severity_corrected;
1067 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); 1237 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1068 sos->os_status = IA64_MCA_CORRECTED; 1238 sos->os_status = IA64_MCA_CORRECTED;
1239 } else {
1240 /* Dump buffered message to console */
1241 ia64_mlogbuf_finish(1);
1069 } 1242 }
1070 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1243 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1071 == NOTIFY_STOP) 1244 == NOTIFY_STOP)
@@ -1106,9 +1279,6 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1106 /* SAL spec states this should run w/ interrupts enabled */ 1279 /* SAL spec states this should run w/ interrupts enabled */
1107 local_irq_enable(); 1280 local_irq_enable();
1108 1281
1109 /* Get the CMC error record and log it */
1110 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1111
1112 spin_lock(&cmc_history_lock); 1282 spin_lock(&cmc_history_lock);
1113 if (!cmc_polling_enabled) { 1283 if (!cmc_polling_enabled) {
1114 int i, count = 1; /* we know 1 happened now */ 1284 int i, count = 1; /* we know 1 happened now */
@@ -1141,7 +1311,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1141 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 1311 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1142 1312
1143 /* lock already released, get out now */ 1313 /* lock already released, get out now */
1144 return IRQ_HANDLED; 1314 goto out;
1145 } else { 1315 } else {
1146 cmc_history[index++] = now; 1316 cmc_history[index++] = now;
1147 if (index == CMC_HISTORY_LENGTH) 1317 if (index == CMC_HISTORY_LENGTH)
@@ -1149,6 +1319,10 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1149 } 1319 }
1150 } 1320 }
1151 spin_unlock(&cmc_history_lock); 1321 spin_unlock(&cmc_history_lock);
1322out:
1323 /* Get the CMC error record and log it */
1324 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1325
1152 return IRQ_HANDLED; 1326 return IRQ_HANDLED;
1153} 1327}
1154 1328
@@ -1305,6 +1479,15 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi
1305 struct task_struct *g, *t; 1479 struct task_struct *g, *t;
1306 if (val != DIE_INIT_MONARCH_PROCESS) 1480 if (val != DIE_INIT_MONARCH_PROCESS)
1307 return NOTIFY_DONE; 1481 return NOTIFY_DONE;
1482
1483 /*
1484 * FIXME: mlogbuf will brim over with INIT stack dumps.
1485 * To enable show_stack from INIT, we use oops_in_progress which should
1486 * be used in real oops. This would cause something wrong after INIT.
1487 */
1488 BREAK_LOGLEVEL(console_loglevel);
1489 ia64_mlogbuf_dump_from_init();
1490
1308 printk(KERN_ERR "Processes interrupted by INIT -"); 1491 printk(KERN_ERR "Processes interrupted by INIT -");
1309 for_each_online_cpu(c) { 1492 for_each_online_cpu(c) {
1310 struct ia64_sal_os_state *s; 1493 struct ia64_sal_os_state *s;
@@ -1326,6 +1509,8 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi
1326 } while_each_thread (g, t); 1509 } while_each_thread (g, t);
1327 read_unlock(&tasklist_lock); 1510 read_unlock(&tasklist_lock);
1328 } 1511 }
1512 /* FIXME: This will not restore zapped printk locks. */
1513 RESTORE_LOGLEVEL(console_loglevel);
1329 return NOTIFY_DONE; 1514 return NOTIFY_DONE;
1330} 1515}
1331 1516
@@ -1357,12 +1542,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1357 struct ia64_mca_notify_die nd = 1542 struct ia64_mca_notify_die nd =
1358 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1543 { .sos = sos, .monarch_cpu = &monarch_cpu };
1359 1544
1360 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
1361 console_loglevel = 15; /* make sure printks make it to console */
1362
1363 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); 1545 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
1364 1546
1365 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1547 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1366 sos->proc_state_param, cpu, sos->monarch); 1548 sos->proc_state_param, cpu, sos->monarch);
1367 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); 1549 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
1368 1550
@@ -1375,7 +1557,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1375 * fix their proms and get their customers updated. 1557 * fix their proms and get their customers updated.
1376 */ 1558 */
1377 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1559 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1378 printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1560 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1379 __FUNCTION__, cpu); 1561 __FUNCTION__, cpu);
1380 atomic_dec(&slaves); 1562 atomic_dec(&slaves);
1381 sos->monarch = 1; 1563 sos->monarch = 1;
@@ -1387,7 +1569,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1387 * fix their proms and get their customers updated. 1569 * fix their proms and get their customers updated.
1388 */ 1570 */
1389 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1571 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1390 printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1572 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1391 __FUNCTION__, cpu); 1573 __FUNCTION__, cpu);
1392 atomic_dec(&monarchs); 1574 atomic_dec(&monarchs);
1393 sos->monarch = 0; 1575 sos->monarch = 0;
@@ -1408,7 +1590,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1408 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1590 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1409 == NOTIFY_STOP) 1591 == NOTIFY_STOP)
1410 ia64_mca_spin(__FUNCTION__); 1592 ia64_mca_spin(__FUNCTION__);
1411 printk("Slave on cpu %d returning to normal service.\n", cpu); 1593 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1412 set_curr_task(cpu, previous_current); 1594 set_curr_task(cpu, previous_current);
1413 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1595 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1414 atomic_dec(&slaves); 1596 atomic_dec(&slaves);
@@ -1426,7 +1608,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1426 * same serial line, the user will need some time to switch out of the BMC before 1608 * same serial line, the user will need some time to switch out of the BMC before
1427 * the dump begins. 1609 * the dump begins.
1428 */ 1610 */
1429 printk("Delaying for 5 seconds...\n"); 1611 mprintk("Delaying for 5 seconds...\n");
1430 udelay(5*1000000); 1612 udelay(5*1000000);
1431 ia64_wait_for_slaves(cpu, "INIT"); 1613 ia64_wait_for_slaves(cpu, "INIT");
1432 /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through 1614 /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
@@ -1439,7 +1621,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1439 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1621 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1440 == NOTIFY_STOP) 1622 == NOTIFY_STOP)
1441 ia64_mca_spin(__FUNCTION__); 1623 ia64_mca_spin(__FUNCTION__);
1442 printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1624 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1443 atomic_dec(&monarchs); 1625 atomic_dec(&monarchs);
1444 set_curr_task(cpu, previous_current); 1626 set_curr_task(cpu, previous_current);
1445 monarch_cpu = -1; 1627 monarch_cpu = -1;
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 96047491d1b9..c6b607c00dee 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -1025,18 +1025,13 @@ ia64_old_stack:
1025 1025
1026ia64_set_kernel_registers: 1026ia64_set_kernel_registers:
1027 add temp3=MCA_SP_OFFSET, r3 1027 add temp3=MCA_SP_OFFSET, r3
1028 add temp4=MCA_SOS_OFFSET+SOS(OS_GP), r3
1029 mov b0=r2 // save return address 1028 mov b0=r2 // save return address
1030 GET_IA64_MCA_DATA(temp1) 1029 GET_IA64_MCA_DATA(temp1)
1031 ;; 1030 ;;
1032 add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp
1033 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack 1031 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
1034 add r13=temp1, r3 // set current to start of MCA/INIT stack 1032 add r13=temp1, r3 // set current to start of MCA/INIT stack
1035 add r20=temp1, r3 // physical start of MCA/INIT stack 1033 add r20=temp1, r3 // physical start of MCA/INIT stack
1036 ;; 1034 ;;
1037 ld8 r1=[temp4] // OS GP from SAL OS state
1038 ;;
1039 DATA_PA_TO_VA(r1,temp1)
1040 DATA_PA_TO_VA(r12,temp2) 1035 DATA_PA_TO_VA(r12,temp2)
1041 DATA_PA_TO_VA(r13,temp3) 1036 DATA_PA_TO_VA(r13,temp3)
1042 ;; 1037 ;;
@@ -1067,6 +1062,10 @@ ia64_set_kernel_registers:
1067 mov cr.itir=r18 1062 mov cr.itir=r18
1068 mov cr.ifa=r13 1063 mov cr.ifa=r13
1069 mov r20=IA64_TR_CURRENT_STACK 1064 mov r20=IA64_TR_CURRENT_STACK
1065
1066 movl r17=FPSR_DEFAULT
1067 ;;
1068 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1070 ;; 1069 ;;
1071 itr.d dtr[r20]=r21 1070 itr.d dtr[r20]=r21
1072 ;; 1071 ;;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 8db6e0cedadc..a45009d2bc90 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -79,14 +79,30 @@ static int
79fatal_mca(const char *fmt, ...) 79fatal_mca(const char *fmt, ...)
80{ 80{
81 va_list args; 81 va_list args;
82 char buf[256];
82 83
83 va_start(args, fmt); 84 va_start(args, fmt);
84 vprintk(fmt, args); 85 vsnprintf(buf, sizeof(buf), fmt, args);
85 va_end(args); 86 va_end(args);
87 ia64_mca_printk(KERN_ALERT "MCA: %s\n", buf);
86 88
87 return MCA_NOT_RECOVERED; 89 return MCA_NOT_RECOVERED;
88} 90}
89 91
92static int
93mca_recovered(const char *fmt, ...)
94{
95 va_list args;
96 char buf[256];
97
98 va_start(args, fmt);
99 vsnprintf(buf, sizeof(buf), fmt, args);
100 va_end(args);
101 ia64_mca_printk(KERN_INFO "MCA: %s\n", buf);
102
103 return MCA_RECOVERED;
104}
105
90/** 106/**
91 * mca_page_isolate - isolate a poisoned page in order not to use it later 107 * mca_page_isolate - isolate a poisoned page in order not to use it later
92 * @paddr: poisoned memory location 108 * @paddr: poisoned memory location
@@ -140,6 +156,7 @@ mca_page_isolate(unsigned long paddr)
140void 156void
141mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) 157mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
142{ 158{
159 ia64_mlogbuf_dump();
143 printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, " 160 printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
144 "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n", 161 "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
145 raw_smp_processor_id(), current->pid, current->uid, 162 raw_smp_processor_id(), current->pid, current->uid,
@@ -440,7 +457,7 @@ recover_from_read_error(slidx_table_t *slidx,
440 457
441 /* Is target address valid? */ 458 /* Is target address valid? */
442 if (!pbci->tv) 459 if (!pbci->tv)
443 return fatal_mca(KERN_ALERT "MCA: target address not valid\n"); 460 return fatal_mca("target address not valid");
444 461
445 /* 462 /*
446 * cpu read or memory-mapped io read 463 * cpu read or memory-mapped io read
@@ -458,7 +475,7 @@ recover_from_read_error(slidx_table_t *slidx,
458 475
459 /* Is minstate valid? */ 476 /* Is minstate valid? */
460 if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate)) 477 if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
461 return fatal_mca(KERN_ALERT "MCA: minstate not valid\n"); 478 return fatal_mca("minstate not valid");
462 psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr); 479 psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
463 psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr); 480 psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
464 481
@@ -492,13 +509,14 @@ recover_from_read_error(slidx_table_t *slidx,
492 psr2->bn = 1; 509 psr2->bn = 1;
493 psr2->i = 0; 510 psr2->i = 0;
494 511
495 return MCA_RECOVERED; 512 return mca_recovered("user memory corruption. "
513 "kill affected process - recovered.");
496 } 514 }
497 515
498 } 516 }
499 517
500 return fatal_mca(KERN_ALERT "MCA: kernel context not recovered," 518 return fatal_mca("kernel context not recovered, iip 0x%lx\n",
501 " iip 0x%lx\n", pmsa->pmsa_iip); 519 pmsa->pmsa_iip);
502} 520}
503 521
504/** 522/**
@@ -584,13 +602,13 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
584 * The machine check is corrected. 602 * The machine check is corrected.
585 */ 603 */
586 if (psp->cm == 1) 604 if (psp->cm == 1)
587 return MCA_RECOVERED; 605 return mca_recovered("machine check is already corrected.");
588 606
589 /* 607 /*
590 * The error was not contained. Software must be reset. 608 * The error was not contained. Software must be reset.
591 */ 609 */
592 if (psp->us || psp->ci == 0) 610 if (psp->us || psp->ci == 0)
593 return fatal_mca(KERN_ALERT "MCA: error not contained\n"); 611 return fatal_mca("error not contained");
594 612
595 /* 613 /*
596 * The cache check and bus check bits have four possible states 614 * The cache check and bus check bits have four possible states
@@ -601,22 +619,22 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
601 * 1 1 Memory error, attempt recovery 619 * 1 1 Memory error, attempt recovery
602 */ 620 */
603 if (psp->bc == 0 || pbci == NULL) 621 if (psp->bc == 0 || pbci == NULL)
604 return fatal_mca(KERN_ALERT "MCA: No bus check\n"); 622 return fatal_mca("No bus check");
605 623
606 /* 624 /*
607 * Sorry, we cannot handle so many. 625 * Sorry, we cannot handle so many.
608 */ 626 */
609 if (peidx_bus_check_num(peidx) > 1) 627 if (peidx_bus_check_num(peidx) > 1)
610 return fatal_mca(KERN_ALERT "MCA: Too many bus checks\n"); 628 return fatal_mca("Too many bus checks");
611 /* 629 /*
612 * Well, here is only one bus error. 630 * Well, here is only one bus error.
613 */ 631 */
614 if (pbci->ib) 632 if (pbci->ib)
615 return fatal_mca(KERN_ALERT "MCA: Internal Bus error\n"); 633 return fatal_mca("Internal Bus error");
616 if (pbci->cc) 634 if (pbci->cc)
617 return fatal_mca(KERN_ALERT "MCA: Cache-cache error\n"); 635 return fatal_mca("Cache-cache error");
618 if (pbci->eb && pbci->bsi > 0) 636 if (pbci->eb && pbci->bsi > 0)
619 return fatal_mca(KERN_ALERT "MCA: External bus check fatal status\n"); 637 return fatal_mca("External bus check fatal status");
620 638
621 /* 639 /*
622 * This is a local MCA and estimated as recoverble external bus error. 640 * This is a local MCA and estimated as recoverble external bus error.
@@ -628,7 +646,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
628 /* 646 /*
629 * On account of strange SAL error record, we cannot recover. 647 * On account of strange SAL error record, we cannot recover.
630 */ 648 */
631 return fatal_mca(KERN_ALERT "MCA: Strange SAL record\n"); 649 return fatal_mca("Strange SAL record");
632} 650}
633 651
634/** 652/**
@@ -657,10 +675,10 @@ mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
657 675
658 /* Now, OS can recover when there is one processor error section */ 676 /* Now, OS can recover when there is one processor error section */
659 if (n_proc_err > 1) 677 if (n_proc_err > 1)
660 return fatal_mca(KERN_ALERT "MCA: Too Many Errors\n"); 678 return fatal_mca("Too Many Errors");
661 else if (n_proc_err == 0) 679 else if (n_proc_err == 0)
662 /* Weird SAL record ... We need not to recover */ 680 /* Weird SAL record ... We can't do anything */
663 return fatal_mca(KERN_ALERT "MCA: Weird SAL record\n"); 681 return fatal_mca("Weird SAL record");
664 682
665 /* Make index of processor error section */ 683 /* Make index of processor error section */
666 mca_make_peidx((sal_log_processor_info_t*) 684 mca_make_peidx((sal_log_processor_info_t*)
@@ -671,7 +689,7 @@ mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
671 689
672 /* Check whether MCA is global or not */ 690 /* Check whether MCA is global or not */
673 if (is_mca_global(&peidx, &pbci, sos)) 691 if (is_mca_global(&peidx, &pbci, sos))
674 return fatal_mca(KERN_ALERT "MCA: global MCA\n"); 692 return fatal_mca("global MCA");
675 693
676 /* Try to recover a processor error */ 694 /* Try to recover a processor error */
677 return recover_from_processor_error(platform_err, &slidx, &peidx, 695 return recover_from_processor_error(platform_err, &slidx, &peidx,
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h
index 31a2e52bb16f..c85e943ba5fd 100644
--- a/arch/ia64/kernel/mca_drv.h
+++ b/arch/ia64/kernel/mca_drv.h
@@ -118,3 +118,7 @@ struct mca_table_entry {
118 118
119extern const struct mca_table_entry *search_mca_tables (unsigned long addr); 119extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
120extern int mca_recover_range(unsigned long); 120extern int mca_recover_range(unsigned long);
121extern void ia64_mca_printk(const char * fmt, ...)
122 __attribute__ ((format (printf, 1, 2)));
123extern void ia64_mlogbuf_dump(void);
124
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 7bb7696e4ce2..281004ff7b00 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -63,6 +63,9 @@
63 63
64#define PFM_INVALID_ACTIVATION (~0UL) 64#define PFM_INVALID_ACTIVATION (~0UL)
65 65
66#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
67#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
68
66/* 69/*
67 * depth of message queue 70 * depth of message queue
68 */ 71 */
@@ -297,14 +300,17 @@ typedef struct pfm_context {
297 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */ 300 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
298 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */ 301 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
299 302
300 unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */ 303 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
301 304
302 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */ 305 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
303 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */ 306 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
304 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */ 307 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
305 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */ 308 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
306 309
307 pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */ 310 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
311
312 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
313 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
308 314
309 u64 ctx_saved_psr_up; /* only contains psr.up value */ 315 u64 ctx_saved_psr_up; /* only contains psr.up value */
310 316
@@ -868,7 +874,6 @@ static void
868pfm_mask_monitoring(struct task_struct *task) 874pfm_mask_monitoring(struct task_struct *task)
869{ 875{
870 pfm_context_t *ctx = PFM_GET_CTX(task); 876 pfm_context_t *ctx = PFM_GET_CTX(task);
871 struct thread_struct *th = &task->thread;
872 unsigned long mask, val, ovfl_mask; 877 unsigned long mask, val, ovfl_mask;
873 int i; 878 int i;
874 879
@@ -889,7 +894,7 @@ pfm_mask_monitoring(struct task_struct *task)
889 * So in both cases, the live register contains the owner's 894 * So in both cases, the live register contains the owner's
890 * state. We can ONLY touch the PMU registers and NOT the PSR. 895 * state. We can ONLY touch the PMU registers and NOT the PSR.
891 * 896 *
892 * As a consequence to this call, the thread->pmds[] array 897 * As a consequence to this call, the ctx->th_pmds[] array
893 * contains stale information which must be ignored 898 * contains stale information which must be ignored
894 * when context is reloaded AND monitoring is active (see 899 * when context is reloaded AND monitoring is active (see
895 * pfm_restart). 900 * pfm_restart).
@@ -924,9 +929,9 @@ pfm_mask_monitoring(struct task_struct *task)
924 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; 929 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
925 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { 930 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
926 if ((mask & 0x1) == 0UL) continue; 931 if ((mask & 0x1) == 0UL) continue;
927 ia64_set_pmc(i, th->pmcs[i] & ~0xfUL); 932 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
928 th->pmcs[i] &= ~0xfUL; 933 ctx->th_pmcs[i] &= ~0xfUL;
929 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i])); 934 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
930 } 935 }
931 /* 936 /*
932 * make all of this visible 937 * make all of this visible
@@ -943,7 +948,6 @@ static void
943pfm_restore_monitoring(struct task_struct *task) 948pfm_restore_monitoring(struct task_struct *task)
944{ 949{
945 pfm_context_t *ctx = PFM_GET_CTX(task); 950 pfm_context_t *ctx = PFM_GET_CTX(task);
946 struct thread_struct *th = &task->thread;
947 unsigned long mask, ovfl_mask; 951 unsigned long mask, ovfl_mask;
948 unsigned long psr, val; 952 unsigned long psr, val;
949 int i, is_system; 953 int i, is_system;
@@ -1009,9 +1013,9 @@ pfm_restore_monitoring(struct task_struct *task)
1009 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; 1013 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1010 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { 1014 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1011 if ((mask & 0x1) == 0UL) continue; 1015 if ((mask & 0x1) == 0UL) continue;
1012 th->pmcs[i] = ctx->ctx_pmcs[i]; 1016 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1013 ia64_set_pmc(i, th->pmcs[i]); 1017 ia64_set_pmc(i, ctx->th_pmcs[i]);
1014 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i])); 1018 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i]));
1015 } 1019 }
1016 ia64_srlz_d(); 1020 ia64_srlz_d();
1017 1021
@@ -1070,7 +1074,6 @@ pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1070static inline void 1074static inline void
1071pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) 1075pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1072{ 1076{
1073 struct thread_struct *thread = &task->thread;
1074 unsigned long ovfl_val = pmu_conf->ovfl_val; 1077 unsigned long ovfl_val = pmu_conf->ovfl_val;
1075 unsigned long mask = ctx->ctx_all_pmds[0]; 1078 unsigned long mask = ctx->ctx_all_pmds[0];
1076 unsigned long val; 1079 unsigned long val;
@@ -1092,11 +1095,11 @@ pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1092 ctx->ctx_pmds[i].val = val & ~ovfl_val; 1095 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1093 val &= ovfl_val; 1096 val &= ovfl_val;
1094 } 1097 }
1095 thread->pmds[i] = val; 1098 ctx->th_pmds[i] = val;
1096 1099
1097 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n", 1100 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1098 i, 1101 i,
1099 thread->pmds[i], 1102 ctx->th_pmds[i],
1100 ctx->ctx_pmds[i].val)); 1103 ctx->ctx_pmds[i].val));
1101 } 1104 }
1102} 1105}
@@ -1107,7 +1110,6 @@ pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1107static inline void 1110static inline void
1108pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) 1111pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1109{ 1112{
1110 struct thread_struct *thread = &task->thread;
1111 unsigned long mask = ctx->ctx_all_pmcs[0]; 1113 unsigned long mask = ctx->ctx_all_pmcs[0];
1112 int i; 1114 int i;
1113 1115
@@ -1115,8 +1117,8 @@ pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1115 1117
1116 for (i=0; mask; i++, mask>>=1) { 1118 for (i=0; mask; i++, mask>>=1) {
1117 /* masking 0 with ovfl_val yields 0 */ 1119 /* masking 0 with ovfl_val yields 0 */
1118 thread->pmcs[i] = ctx->ctx_pmcs[i]; 1120 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1119 DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i])); 1121 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1120 } 1122 }
1121} 1123}
1122 1124
@@ -2860,7 +2862,6 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2860static int 2862static int
2861pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 2863pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2862{ 2864{
2863 struct thread_struct *thread = NULL;
2864 struct task_struct *task; 2865 struct task_struct *task;
2865 pfarg_reg_t *req = (pfarg_reg_t *)arg; 2866 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2866 unsigned long value, pmc_pm; 2867 unsigned long value, pmc_pm;
@@ -2881,7 +2882,6 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2881 if (state == PFM_CTX_ZOMBIE) return -EINVAL; 2882 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2882 2883
2883 if (is_loaded) { 2884 if (is_loaded) {
2884 thread = &task->thread;
2885 /* 2885 /*
2886 * In system wide and when the context is loaded, access can only happen 2886 * In system wide and when the context is loaded, access can only happen
2887 * when the caller is running on the CPU being monitored by the session. 2887 * when the caller is running on the CPU being monitored by the session.
@@ -3036,7 +3036,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3036 * 3036 *
3037 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs(). 3037 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3038 * 3038 *
3039 * The value in thread->pmcs[] may be modified on overflow, i.e., when 3039 * The value in th_pmcs[] may be modified on overflow, i.e., when
3040 * monitoring needs to be stopped. 3040 * monitoring needs to be stopped.
3041 */ 3041 */
3042 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum); 3042 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
@@ -3050,7 +3050,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3050 /* 3050 /*
3051 * write thread state 3051 * write thread state
3052 */ 3052 */
3053 if (is_system == 0) thread->pmcs[cnum] = value; 3053 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3054 3054
3055 /* 3055 /*
3056 * write hardware register if we can 3056 * write hardware register if we can
@@ -3102,7 +3102,6 @@ error:
3102static int 3102static int
3103pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 3103pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3104{ 3104{
3105 struct thread_struct *thread = NULL;
3106 struct task_struct *task; 3105 struct task_struct *task;
3107 pfarg_reg_t *req = (pfarg_reg_t *)arg; 3106 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3108 unsigned long value, hw_value, ovfl_mask; 3107 unsigned long value, hw_value, ovfl_mask;
@@ -3126,7 +3125,6 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3126 * the owner of the local PMU. 3125 * the owner of the local PMU.
3127 */ 3126 */
3128 if (likely(is_loaded)) { 3127 if (likely(is_loaded)) {
3129 thread = &task->thread;
3130 /* 3128 /*
3131 * In system wide and when the context is loaded, access can only happen 3129 * In system wide and when the context is loaded, access can only happen
3132 * when the caller is running on the CPU being monitored by the session. 3130 * when the caller is running on the CPU being monitored by the session.
@@ -3234,7 +3232,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3234 /* 3232 /*
3235 * write thread state 3233 * write thread state
3236 */ 3234 */
3237 if (is_system == 0) thread->pmds[cnum] = hw_value; 3235 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3238 3236
3239 /* 3237 /*
3240 * write hardware register if we can 3238 * write hardware register if we can
@@ -3300,7 +3298,6 @@ abort_mission:
3300static int 3298static int
3301pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 3299pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3302{ 3300{
3303 struct thread_struct *thread = NULL;
3304 struct task_struct *task; 3301 struct task_struct *task;
3305 unsigned long val = 0UL, lval, ovfl_mask, sval; 3302 unsigned long val = 0UL, lval, ovfl_mask, sval;
3306 pfarg_reg_t *req = (pfarg_reg_t *)arg; 3303 pfarg_reg_t *req = (pfarg_reg_t *)arg;
@@ -3324,7 +3321,6 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3324 if (state == PFM_CTX_ZOMBIE) return -EINVAL; 3321 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3325 3322
3326 if (likely(is_loaded)) { 3323 if (likely(is_loaded)) {
3327 thread = &task->thread;
3328 /* 3324 /*
3329 * In system wide and when the context is loaded, access can only happen 3325 * In system wide and when the context is loaded, access can only happen
3330 * when the caller is running on the CPU being monitored by the session. 3326 * when the caller is running on the CPU being monitored by the session.
@@ -3386,7 +3382,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3386 * if context is zombie, then task does not exist anymore. 3382 * if context is zombie, then task does not exist anymore.
3387 * In this case, we use the full value saved in the context (pfm_flush_regs()). 3383 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3388 */ 3384 */
3389 val = is_loaded ? thread->pmds[cnum] : 0UL; 3385 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3390 } 3386 }
3391 rd_func = pmu_conf->pmd_desc[cnum].read_check; 3387 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3392 3388
@@ -4355,8 +4351,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4355 pfm_copy_pmds(task, ctx); 4351 pfm_copy_pmds(task, ctx);
4356 pfm_copy_pmcs(task, ctx); 4352 pfm_copy_pmcs(task, ctx);
4357 4353
4358 pmcs_source = thread->pmcs; 4354 pmcs_source = ctx->th_pmcs;
4359 pmds_source = thread->pmds; 4355 pmds_source = ctx->th_pmds;
4360 4356
4361 /* 4357 /*
4362 * always the case for system-wide 4358 * always the case for system-wide
@@ -5865,14 +5861,12 @@ void
5865pfm_save_regs(struct task_struct *task) 5861pfm_save_regs(struct task_struct *task)
5866{ 5862{
5867 pfm_context_t *ctx; 5863 pfm_context_t *ctx;
5868 struct thread_struct *t;
5869 unsigned long flags; 5864 unsigned long flags;
5870 u64 psr; 5865 u64 psr;
5871 5866
5872 5867
5873 ctx = PFM_GET_CTX(task); 5868 ctx = PFM_GET_CTX(task);
5874 if (ctx == NULL) return; 5869 if (ctx == NULL) return;
5875 t = &task->thread;
5876 5870
5877 /* 5871 /*
5878 * we always come here with interrupts ALREADY disabled by 5872 * we always come here with interrupts ALREADY disabled by
@@ -5930,19 +5924,19 @@ pfm_save_regs(struct task_struct *task)
5930 * guarantee we will be schedule at that same 5924 * guarantee we will be schedule at that same
5931 * CPU again. 5925 * CPU again.
5932 */ 5926 */
5933 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]); 5927 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5934 5928
5935 /* 5929 /*
5936 * save pmc0 ia64_srlz_d() done in pfm_save_pmds() 5930 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5937 * we will need it on the restore path to check 5931 * we will need it on the restore path to check
5938 * for pending overflow. 5932 * for pending overflow.
5939 */ 5933 */
5940 t->pmcs[0] = ia64_get_pmc(0); 5934 ctx->th_pmcs[0] = ia64_get_pmc(0);
5941 5935
5942 /* 5936 /*
5943 * unfreeze PMU if had pending overflows 5937 * unfreeze PMU if had pending overflows
5944 */ 5938 */
5945 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); 5939 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5946 5940
5947 /* 5941 /*
5948 * finally, allow context access. 5942 * finally, allow context access.
@@ -5987,7 +5981,6 @@ static void
5987pfm_lazy_save_regs (struct task_struct *task) 5981pfm_lazy_save_regs (struct task_struct *task)
5988{ 5982{
5989 pfm_context_t *ctx; 5983 pfm_context_t *ctx;
5990 struct thread_struct *t;
5991 unsigned long flags; 5984 unsigned long flags;
5992 5985
5993 { u64 psr = pfm_get_psr(); 5986 { u64 psr = pfm_get_psr();
@@ -5995,7 +5988,6 @@ pfm_lazy_save_regs (struct task_struct *task)
5995 } 5988 }
5996 5989
5997 ctx = PFM_GET_CTX(task); 5990 ctx = PFM_GET_CTX(task);
5998 t = &task->thread;
5999 5991
6000 /* 5992 /*
6001 * we need to mask PMU overflow here to 5993 * we need to mask PMU overflow here to
@@ -6020,19 +6012,19 @@ pfm_lazy_save_regs (struct task_struct *task)
6020 /* 6012 /*
6021 * save all the pmds we use 6013 * save all the pmds we use
6022 */ 6014 */
6023 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]); 6015 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6024 6016
6025 /* 6017 /*
6026 * save pmc0 ia64_srlz_d() done in pfm_save_pmds() 6018 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6027 * it is needed to check for pended overflow 6019 * it is needed to check for pended overflow
6028 * on the restore path 6020 * on the restore path
6029 */ 6021 */
6030 t->pmcs[0] = ia64_get_pmc(0); 6022 ctx->th_pmcs[0] = ia64_get_pmc(0);
6031 6023
6032 /* 6024 /*
6033 * unfreeze PMU if had pending overflows 6025 * unfreeze PMU if had pending overflows
6034 */ 6026 */
6035 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); 6027 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6036 6028
6037 /* 6029 /*
6038 * now get can unmask PMU interrupts, they will 6030 * now get can unmask PMU interrupts, they will
@@ -6051,7 +6043,6 @@ void
6051pfm_load_regs (struct task_struct *task) 6043pfm_load_regs (struct task_struct *task)
6052{ 6044{
6053 pfm_context_t *ctx; 6045 pfm_context_t *ctx;
6054 struct thread_struct *t;
6055 unsigned long pmc_mask = 0UL, pmd_mask = 0UL; 6046 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6056 unsigned long flags; 6047 unsigned long flags;
6057 u64 psr, psr_up; 6048 u64 psr, psr_up;
@@ -6062,11 +6053,10 @@ pfm_load_regs (struct task_struct *task)
6062 6053
6063 BUG_ON(GET_PMU_OWNER()); 6054 BUG_ON(GET_PMU_OWNER());
6064 6055
6065 t = &task->thread;
6066 /* 6056 /*
6067 * possible on unload 6057 * possible on unload
6068 */ 6058 */
6069 if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return; 6059 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6070 6060
6071 /* 6061 /*
6072 * we always come here with interrupts ALREADY disabled by 6062 * we always come here with interrupts ALREADY disabled by
@@ -6148,21 +6138,21 @@ pfm_load_regs (struct task_struct *task)
6148 * 6138 *
6149 * XXX: optimize here 6139 * XXX: optimize here
6150 */ 6140 */
6151 if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask); 6141 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6152 if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask); 6142 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6153 6143
6154 /* 6144 /*
6155 * check for pending overflow at the time the state 6145 * check for pending overflow at the time the state
6156 * was saved. 6146 * was saved.
6157 */ 6147 */
6158 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) { 6148 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6159 /* 6149 /*
6160 * reload pmc0 with the overflow information 6150 * reload pmc0 with the overflow information
6161 * On McKinley PMU, this will trigger a PMU interrupt 6151 * On McKinley PMU, this will trigger a PMU interrupt
6162 */ 6152 */
6163 ia64_set_pmc(0, t->pmcs[0]); 6153 ia64_set_pmc(0, ctx->th_pmcs[0]);
6164 ia64_srlz_d(); 6154 ia64_srlz_d();
6165 t->pmcs[0] = 0UL; 6155 ctx->th_pmcs[0] = 0UL;
6166 6156
6167 /* 6157 /*
6168 * will replay the PMU interrupt 6158 * will replay the PMU interrupt
@@ -6215,7 +6205,6 @@ pfm_load_regs (struct task_struct *task)
6215void 6205void
6216pfm_load_regs (struct task_struct *task) 6206pfm_load_regs (struct task_struct *task)
6217{ 6207{
6218 struct thread_struct *t;
6219 pfm_context_t *ctx; 6208 pfm_context_t *ctx;
6220 struct task_struct *owner; 6209 struct task_struct *owner;
6221 unsigned long pmd_mask, pmc_mask; 6210 unsigned long pmd_mask, pmc_mask;
@@ -6224,7 +6213,6 @@ pfm_load_regs (struct task_struct *task)
6224 6213
6225 owner = GET_PMU_OWNER(); 6214 owner = GET_PMU_OWNER();
6226 ctx = PFM_GET_CTX(task); 6215 ctx = PFM_GET_CTX(task);
6227 t = &task->thread;
6228 psr = pfm_get_psr(); 6216 psr = pfm_get_psr();
6229 6217
6230 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); 6218 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
@@ -6287,22 +6275,22 @@ pfm_load_regs (struct task_struct *task)
6287 */ 6275 */
6288 pmc_mask = ctx->ctx_all_pmcs[0]; 6276 pmc_mask = ctx->ctx_all_pmcs[0];
6289 6277
6290 pfm_restore_pmds(t->pmds, pmd_mask); 6278 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6291 pfm_restore_pmcs(t->pmcs, pmc_mask); 6279 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6292 6280
6293 /* 6281 /*
6294 * check for pending overflow at the time the state 6282 * check for pending overflow at the time the state
6295 * was saved. 6283 * was saved.
6296 */ 6284 */
6297 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) { 6285 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6298 /* 6286 /*
6299 * reload pmc0 with the overflow information 6287 * reload pmc0 with the overflow information
6300 * On McKinley PMU, this will trigger a PMU interrupt 6288 * On McKinley PMU, this will trigger a PMU interrupt
6301 */ 6289 */
6302 ia64_set_pmc(0, t->pmcs[0]); 6290 ia64_set_pmc(0, ctx->th_pmcs[0]);
6303 ia64_srlz_d(); 6291 ia64_srlz_d();
6304 6292
6305 t->pmcs[0] = 0UL; 6293 ctx->th_pmcs[0] = 0UL;
6306 6294
6307 /* 6295 /*
6308 * will replay the PMU interrupt 6296 * will replay the PMU interrupt
@@ -6377,11 +6365,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6377 */ 6365 */
6378 pfm_unfreeze_pmu(); 6366 pfm_unfreeze_pmu();
6379 } else { 6367 } else {
6380 pmc0 = task->thread.pmcs[0]; 6368 pmc0 = ctx->th_pmcs[0];
6381 /* 6369 /*
6382 * clear whatever overflow status bits there were 6370 * clear whatever overflow status bits there were
6383 */ 6371 */
6384 task->thread.pmcs[0] = 0; 6372 ctx->th_pmcs[0] = 0;
6385 } 6373 }
6386 ovfl_val = pmu_conf->ovfl_val; 6374 ovfl_val = pmu_conf->ovfl_val;
6387 /* 6375 /*
@@ -6402,7 +6390,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6402 /* 6390 /*
6403 * can access PMU always true in system wide mode 6391 * can access PMU always true in system wide mode
6404 */ 6392 */
6405 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i]; 6393 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6406 6394
6407 if (PMD_IS_COUNTING(i)) { 6395 if (PMD_IS_COUNTING(i)) {
6408 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", 6396 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
@@ -6434,7 +6422,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6434 6422
6435 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); 6423 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
6436 6424
6437 if (is_self) task->thread.pmds[i] = pmd_val; 6425 if (is_self) ctx->th_pmds[i] = pmd_val;
6438 6426
6439 ctx->ctx_pmds[i].val = val; 6427 ctx->ctx_pmds[i].val = val;
6440 } 6428 }
@@ -6678,7 +6666,7 @@ pfm_init(void)
6678 ffz(pmu_conf->ovfl_val)); 6666 ffz(pmu_conf->ovfl_val));
6679 6667
6680 /* sanity check */ 6668 /* sanity check */
6681 if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) { 6669 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6682 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n"); 6670 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6683 pmu_conf = NULL; 6671 pmu_conf = NULL;
6684 return -1; 6672 return -1;
@@ -6753,7 +6741,6 @@ void
6753dump_pmu_state(const char *from) 6741dump_pmu_state(const char *from)
6754{ 6742{
6755 struct task_struct *task; 6743 struct task_struct *task;
6756 struct thread_struct *t;
6757 struct pt_regs *regs; 6744 struct pt_regs *regs;
6758 pfm_context_t *ctx; 6745 pfm_context_t *ctx;
6759 unsigned long psr, dcr, info, flags; 6746 unsigned long psr, dcr, info, flags;
@@ -6798,16 +6785,14 @@ dump_pmu_state(const char *from)
6798 ia64_psr(regs)->up = 0; 6785 ia64_psr(regs)->up = 0;
6799 ia64_psr(regs)->pp = 0; 6786 ia64_psr(regs)->pp = 0;
6800 6787
6801 t = &current->thread;
6802
6803 for (i=1; PMC_IS_LAST(i) == 0; i++) { 6788 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6804 if (PMC_IS_IMPL(i) == 0) continue; 6789 if (PMC_IS_IMPL(i) == 0) continue;
6805 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]); 6790 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6806 } 6791 }
6807 6792
6808 for (i=1; PMD_IS_LAST(i) == 0; i++) { 6793 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6809 if (PMD_IS_IMPL(i) == 0) continue; 6794 if (PMD_IS_IMPL(i) == 0) continue;
6810 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]); 6795 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6811 } 6796 }
6812 6797
6813 if (ctx) { 6798 if (ctx) {
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 9065f0f01ba3..e63b8ca5344a 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -266,6 +266,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
266/* Check for outstanding MCA/INIT records every minute (arbitrary) */ 266/* Check for outstanding MCA/INIT records every minute (arbitrary) */
267#define SALINFO_TIMER_DELAY (60*HZ) 267#define SALINFO_TIMER_DELAY (60*HZ)
268static struct timer_list salinfo_timer; 268static struct timer_list salinfo_timer;
269extern void ia64_mlogbuf_dump(void);
269 270
270static void 271static void
271salinfo_timeout_check(struct salinfo_data *data) 272salinfo_timeout_check(struct salinfo_data *data)
@@ -283,6 +284,7 @@ salinfo_timeout_check(struct salinfo_data *data)
283static void 284static void
284salinfo_timeout (unsigned long arg) 285salinfo_timeout (unsigned long arg)
285{ 286{
287 ia64_mlogbuf_dump();
286 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); 288 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA);
287 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT); 289 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT);
288 salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; 290 salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY;
@@ -332,6 +334,8 @@ retry:
332 if (cpu == -1) 334 if (cpu == -1)
333 goto retry; 335 goto retry;
334 336
337 ia64_mlogbuf_dump();
338
335 /* for next read, start checking at next CPU */ 339 /* for next read, start checking at next CPU */
336 data->cpu_check = cpu; 340 data->cpu_check = cpu;
337 if (++data->cpu_check == NR_CPUS) 341 if (++data->cpu_check == NR_CPUS)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 7ad0d9cc6db6..84f93c0f2c66 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -509,7 +509,7 @@ show_cpuinfo (struct seq_file *m, void *v)
509 { 1UL << 1, "spontaneous deferral"}, 509 { 1UL << 1, "spontaneous deferral"},
510 { 1UL << 2, "16-byte atomic ops" } 510 { 1UL << 2, "16-byte atomic ops" }
511 }; 511 };
512 char family[32], features[128], *cp, sep; 512 char features[128], *cp, sep;
513 struct cpuinfo_ia64 *c = v; 513 struct cpuinfo_ia64 *c = v;
514 unsigned long mask; 514 unsigned long mask;
515 unsigned long proc_freq; 515 unsigned long proc_freq;
@@ -517,12 +517,6 @@ show_cpuinfo (struct seq_file *m, void *v)
517 517
518 mask = c->features; 518 mask = c->features;
519 519
520 switch (c->family) {
521 case 0x07: memcpy(family, "Itanium", 8); break;
522 case 0x1f: memcpy(family, "Itanium 2", 10); break;
523 default: sprintf(family, "%u", c->family); break;
524 }
525
526 /* build the feature string: */ 520 /* build the feature string: */
527 memcpy(features, " standard", 10); 521 memcpy(features, " standard", 10);
528 cp = features; 522 cp = features;
@@ -553,8 +547,9 @@ show_cpuinfo (struct seq_file *m, void *v)
553 "processor : %d\n" 547 "processor : %d\n"
554 "vendor : %s\n" 548 "vendor : %s\n"
555 "arch : IA-64\n" 549 "arch : IA-64\n"
556 "family : %s\n" 550 "family : %u\n"
557 "model : %u\n" 551 "model : %u\n"
552 "model name : %s\n"
558 "revision : %u\n" 553 "revision : %u\n"
559 "archrev : %u\n" 554 "archrev : %u\n"
560 "features :%s\n" /* don't change this---it _is_ right! */ 555 "features :%s\n" /* don't change this---it _is_ right! */
@@ -563,7 +558,8 @@ show_cpuinfo (struct seq_file *m, void *v)
563 "cpu MHz : %lu.%06lu\n" 558 "cpu MHz : %lu.%06lu\n"
564 "itc MHz : %lu.%06lu\n" 559 "itc MHz : %lu.%06lu\n"
565 "BogoMIPS : %lu.%02lu\n", 560 "BogoMIPS : %lu.%02lu\n",
566 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 561 cpunum, c->vendor, c->family, c->model,
562 c->model_name, c->revision, c->archrev,
567 features, c->ppn, c->number, 563 features, c->ppn, c->number,
568 proc_freq / 1000, proc_freq % 1000, 564 proc_freq / 1000, proc_freq % 1000,
569 c->itc_freq / 1000000, c->itc_freq % 1000000, 565 c->itc_freq / 1000000, c->itc_freq % 1000000,
@@ -611,6 +607,31 @@ struct seq_operations cpuinfo_op = {
611 .show = show_cpuinfo 607 .show = show_cpuinfo
612}; 608};
613 609
610static char brandname[128];
611
612static char * __cpuinit
613get_model_name(__u8 family, __u8 model)
614{
615 char brand[128];
616
617 if (ia64_pal_get_brand_info(brand)) {
618 if (family == 0x7)
619 memcpy(brand, "Merced", 7);
620 else if (family == 0x1f) switch (model) {
621 case 0: memcpy(brand, "McKinley", 9); break;
622 case 1: memcpy(brand, "Madison", 8); break;
623 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
624 } else
625 memcpy(brand, "Unknown", 8);
626 }
627 if (brandname[0] == '\0')
628 return strcpy(brandname, brand);
629 else if (strcmp(brandname, brand) == 0)
630 return brandname;
631 else
632 return kstrdup(brand, GFP_KERNEL);
633}
634
614static void __cpuinit 635static void __cpuinit
615identify_cpu (struct cpuinfo_ia64 *c) 636identify_cpu (struct cpuinfo_ia64 *c)
616{ 637{
@@ -640,7 +661,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
640 pal_status_t status; 661 pal_status_t status;
641 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 662 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
642 int i; 663 int i;
643
644 for (i = 0; i < 5; ++i) 664 for (i = 0; i < 5; ++i)
645 cpuid.bits[i] = ia64_get_cpuid(i); 665 cpuid.bits[i] = ia64_get_cpuid(i);
646 666
@@ -663,6 +683,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
663 c->family = cpuid.field.family; 683 c->family = cpuid.field.family;
664 c->archrev = cpuid.field.archrev; 684 c->archrev = cpuid.field.archrev;
665 c->features = cpuid.field.features; 685 c->features = cpuid.field.features;
686 c->model_name = get_model_name(c->family, c->model);
666 687
667 status = ia64_pal_vm_summary(&vm1, &vm2); 688 status = ia64_pal_vm_summary(&vm1, &vm2);
668 if (status == PAL_STATUS_SUCCESS) { 689 if (status == PAL_STATUS_SUCCESS) {
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 6203ed4ec8cf..f7d7f5668144 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -879,3 +879,27 @@ identify_siblings(struct cpuinfo_ia64 *c)
879 c->core_id = info.log1_cid; 879 c->core_id = info.log1_cid;
880 c->thread_id = info.log1_tid; 880 c->thread_id = info.log1_tid;
881} 881}
882
883/*
884 * returns non zero, if multi-threading is enabled
885 * on at least one physical package. Due to hotplug cpu
886 * and (maxcpus=), all threads may not necessarily be enabled
887 * even though the processor supports multi-threading.
888 */
889int is_multithreading_enabled(void)
890{
891 int i, j;
892
893 for_each_present_cpu(i) {
894 for_each_present_cpu(j) {
895 if (j == i)
896 continue;
897 if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
898 if (cpu_data(j)->core_id == cpu_data(i)->core_id)
899 return 1;
900 }
901 }
902 }
903 return 0;
904}
905EXPORT_SYMBOL_GPL(is_multithreading_enabled);
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5b0d5f64a9b1..b3b2e389d6b2 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -184,7 +184,9 @@ SECTIONS
184 *(.data.gate) 184 *(.data.gate)
185 __stop_gate_section = .; 185 __stop_gate_section = .;
186 } 186 }
187 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */ 187 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
188 * kernel data
189 */
188 190
189 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) 191 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
190 { *(.data.read_mostly) } 192 { *(.data.read_mostly) }
@@ -202,7 +204,9 @@ SECTIONS
202 *(.data.percpu) 204 *(.data.percpu)
203 __per_cpu_end = .; 205 __per_cpu_end = .;
204 } 206 }
205 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */ 207 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
208 * into percpu page size
209 */
206 210
207 data : { } :data 211 data : { } :data
208 .data : AT(ADDR(.data) - LOAD_OFFSET) 212 .data : AT(ADDR(.data) - LOAD_OFFSET)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 719d476e71ba..daf977ff2920 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -40,10 +40,11 @@ show_mem (void)
40 int i, total = 0, reserved = 0; 40 int i, total = 0, reserved = 0;
41 int shared = 0, cached = 0; 41 int shared = 0, cached = 0;
42 42
43 printk("Mem-info:\n"); 43 printk(KERN_INFO "Mem-info:\n");
44 show_free_areas(); 44 show_free_areas();
45 45
46 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 46 printk(KERN_INFO "Free swap: %6ldkB\n",
47 nr_swap_pages<<(PAGE_SHIFT-10));
47 i = max_mapnr; 48 i = max_mapnr;
48 for (i = 0; i < max_mapnr; i++) { 49 for (i = 0; i < max_mapnr; i++) {
49 if (!pfn_valid(i)) { 50 if (!pfn_valid(i)) {
@@ -62,12 +63,12 @@ show_mem (void)
62 else if (page_count(mem_map + i)) 63 else if (page_count(mem_map + i))
63 shared += page_count(mem_map + i) - 1; 64 shared += page_count(mem_map + i) - 1;
64 } 65 }
65 printk("%d pages of RAM\n", total); 66 printk(KERN_INFO "%d pages of RAM\n", total);
66 printk("%d reserved pages\n", reserved); 67 printk(KERN_INFO "%d reserved pages\n", reserved);
67 printk("%d pages shared\n", shared); 68 printk(KERN_INFO "%d pages shared\n", shared);
68 printk("%d pages swap cached\n", cached); 69 printk(KERN_INFO "%d pages swap cached\n", cached);
69 printk("%ld pages in page table cache\n", 70 printk(KERN_INFO "%ld pages in page table cache\n",
70 pgtable_quicklist_total_size()); 71 pgtable_quicklist_total_size());
71} 72}
72 73
73/* physical address where the bootmem map is located */ 74/* physical address where the bootmem map is located */
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 7bd28079dcc4..d497b6b0f5b2 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -547,15 +547,16 @@ void show_mem(void)
547 unsigned long total_present = 0; 547 unsigned long total_present = 0;
548 pg_data_t *pgdat; 548 pg_data_t *pgdat;
549 549
550 printk("Mem-info:\n"); 550 printk(KERN_INFO "Mem-info:\n");
551 show_free_areas(); 551 show_free_areas();
552 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 552 printk(KERN_INFO "Free swap: %6ldkB\n",
553 nr_swap_pages<<(PAGE_SHIFT-10));
554 printk(KERN_INFO "Node memory in pages:\n");
553 for_each_online_pgdat(pgdat) { 555 for_each_online_pgdat(pgdat) {
554 unsigned long present; 556 unsigned long present;
555 unsigned long flags; 557 unsigned long flags;
556 int shared = 0, cached = 0, reserved = 0; 558 int shared = 0, cached = 0, reserved = 0;
557 559
558 printk("Node ID: %d\n", pgdat->node_id);
559 pgdat_resize_lock(pgdat, &flags); 560 pgdat_resize_lock(pgdat, &flags);
560 present = pgdat->node_present_pages; 561 present = pgdat->node_present_pages;
561 for(i = 0; i < pgdat->node_spanned_pages; i++) { 562 for(i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -579,18 +580,17 @@ void show_mem(void)
579 total_reserved += reserved; 580 total_reserved += reserved;
580 total_cached += cached; 581 total_cached += cached;
581 total_shared += shared; 582 total_shared += shared;
582 printk("\t%ld pages of RAM\n", present); 583 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
583 printk("\t%d reserved pages\n", reserved); 584 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
584 printk("\t%d pages shared\n", shared); 585 present, reserved, shared, cached);
585 printk("\t%d pages swap cached\n", cached);
586 } 586 }
587 printk("%ld pages of RAM\n", total_present); 587 printk(KERN_INFO "%ld pages of RAM\n", total_present);
588 printk("%d reserved pages\n", total_reserved); 588 printk(KERN_INFO "%d reserved pages\n", total_reserved);
589 printk("%d pages shared\n", total_shared); 589 printk(KERN_INFO "%d pages shared\n", total_shared);
590 printk("%d pages swap cached\n", total_cached); 590 printk(KERN_INFO "%d pages swap cached\n", total_cached);
591 printk("Total of %ld pages in page table cache\n", 591 printk(KERN_INFO "Total of %ld pages in page table cache\n",
592 pgtable_quicklist_total_size()); 592 pgtable_quicklist_total_size());
593 printk("%d free buffer pages\n", nr_free_buffer_pages()); 593 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
594} 594}
595 595
596/** 596/**
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 27dee4584061..7f73ad4408aa 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -277,8 +277,7 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
277 } 277 }
278 278
279 /* temporary buffer used during unaligned transfers */ 279 /* temporary buffer used during unaligned transfers */
280 bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, 280 bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, GFP_KERNEL);
281 GFP_KERNEL | GFP_DMA);
282 if (bteBlock_unaligned == NULL) { 281 if (bteBlock_unaligned == NULL) {
283 return BTEFAIL_NOTAVAIL; 282 return BTEFAIL_NOTAVAIL;
284 } 283 }