aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-09-28 08:29:59 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-09-28 08:29:59 -0400
commit185a257f2f73bcd89050ad02da5bedbc28fc43fa (patch)
tree5e32586114534ed3f2165614cba3d578f5d87307 /arch/ia64/kernel
parent3f1a9aaeffd8d1cbc5ab9776c45cbd66af1c9699 (diff)
parenta77c64c1a641950626181b4857abb701d8f38ccc (diff)
Merge branch 'master' into gfs2
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile5
-rw-r--r--arch/ia64/kernel/acpi.c13
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/esi.c205
-rw-r--r--arch/ia64/kernel/esi_stub.S96
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c4
-rw-r--r--arch/ia64/kernel/kprobes.c61
-rw-r--r--arch/ia64/kernel/mca.c234
-rw-r--r--arch/ia64/kernel/mca_asm.S9
-rw-r--r--arch/ia64/kernel/mca_drv.c54
-rw-r--r--arch/ia64/kernel/mca_drv.h4
-rw-r--r--arch/ia64/kernel/numa.c34
-rw-r--r--arch/ia64/kernel/perfmon.c114
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c41
-rw-r--r--arch/ia64/kernel/smpboot.c24
-rw-r--r--arch/ia64/kernel/topology.c4
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S8
19 files changed, 757 insertions, 163 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index ad8215a3c586..31497496eb4b 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -32,6 +32,11 @@ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
32obj-$(CONFIG_AUDIT) += audit.o 32obj-$(CONFIG_AUDIT) += audit.o
33mca_recovery-y += mca_drv.o mca_drv_asm.o 33mca_recovery-y += mca_drv.o mca_drv_asm.o
34 34
35obj-$(CONFIG_IA64_ESI) += esi.o
36ifneq ($(CONFIG_IA64_ESI),)
37obj-y += esi_stub.o # must be in kernel proper
38endif
39
35# The gate DSO image is built using a special linker script. 40# The gate DSO image is built using a special linker script.
36targets += gate.so gate-syms.o 41targets += gate.so gate-syms.o
37 42
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 0176556aeecc..32c3abededc6 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -771,16 +771,19 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
771{ 771{
772#ifdef CONFIG_ACPI_NUMA 772#ifdef CONFIG_ACPI_NUMA
773 int pxm_id; 773 int pxm_id;
774 int nid;
774 775
775 pxm_id = acpi_get_pxm(handle); 776 pxm_id = acpi_get_pxm(handle);
776
777 /* 777 /*
778 * Assuming that the container driver would have set the proximity 778 * We don't have cpu-only-node hotadd. But if the system equips
779 * domain and would have initialized pxm_to_node(pxm_id) && pxm_flag 779 * SRAT table, pxm is already found and node is ready.
780 * So, just pxm_to_nid(pxm) is OK.
781 * This code here is for the system which doesn't have full SRAT
782 * table for possible cpus.
780 */ 783 */
781 node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_node(pxm_id); 784 nid = acpi_map_pxm_to_node(pxm_id);
782
783 node_cpuid[cpu].phys_id = physid; 785 node_cpuid[cpu].phys_id = physid;
786 node_cpuid[cpu].nid = nid;
784#endif 787#endif
785 return (0); 788 return (0);
786} 789}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index fef06571be99..12701cf32d99 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1605,8 +1605,8 @@ sys_call_table:
1605 data8 sys_ni_syscall // 1295 reserved for ppoll 1605 data8 sys_ni_syscall // 1295 reserved for ppoll
1606 data8 sys_unshare 1606 data8 sys_unshare
1607 data8 sys_splice 1607 data8 sys_splice
1608 data8 sys_ni_syscall // reserved for set_robust_list 1608 data8 sys_set_robust_list
1609 data8 sys_ni_syscall // reserved for get_robust_list 1609 data8 sys_get_robust_list
1610 data8 sys_sync_file_range // 1300 1610 data8 sys_sync_file_range // 1300
1611 data8 sys_tee 1611 data8 sys_tee
1612 data8 sys_vmsplice 1612 data8 sys_vmsplice
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
new file mode 100644
index 000000000000..ebf4e988e78c
--- /dev/null
+++ b/arch/ia64/kernel/esi.c
@@ -0,0 +1,205 @@
1/*
2 * Extensible SAL Interface (ESI) support routines.
3 *
4 * Copyright (C) 2006 Hewlett-Packard Co
5 * Alex Williamson <alex.williamson@hp.com>
6 */
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/string.h>
11
12#include <asm/esi.h>
13#include <asm/sal.h>
14
15MODULE_AUTHOR("Alex Williamson <alex.williamson@hp.com>");
16MODULE_DESCRIPTION("Extensible SAL Interface (ESI) support");
17MODULE_LICENSE("GPL");
18
19#define MODULE_NAME "esi"
20
21#define ESI_TABLE_GUID \
22 EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \
23 0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4)
24
25enum esi_systab_entry_type {
26 ESI_DESC_ENTRY_POINT = 0
27};
28
29/*
30 * Entry type: Size:
31 * 0 48
32 */
33#define ESI_DESC_SIZE(type) "\060"[(unsigned) (type)]
34
35typedef struct ia64_esi_desc_entry_point {
36 u8 type;
37 u8 reserved1[15];
38 u64 esi_proc;
39 u64 gp;
40 efi_guid_t guid;
41} ia64_esi_desc_entry_point_t;
42
43struct pdesc {
44 void *addr;
45 void *gp;
46};
47
48static struct ia64_sal_systab *esi_systab;
49
50static int __init esi_init (void)
51{
52 efi_config_table_t *config_tables;
53 struct ia64_sal_systab *systab;
54 unsigned long esi = 0;
55 char *p;
56 int i;
57
58 config_tables = __va(efi.systab->tables);
59
60 for (i = 0; i < (int) efi.systab->nr_tables; ++i) {
61 if (efi_guidcmp(config_tables[i].guid, ESI_TABLE_GUID) == 0) {
62 esi = config_tables[i].table;
63 break;
64 }
65 }
66
67 if (!esi)
68 return -ENODEV;;
69
70 systab = __va(esi);
71
72 if (strncmp(systab->signature, "ESIT", 4) != 0) {
73 printk(KERN_ERR "bad signature in ESI system table!");
74 return -ENODEV;
75 }
76
77 p = (char *) (systab + 1);
78 for (i = 0; i < systab->entry_count; i++) {
79 /*
80 * The first byte of each entry type contains the type
81 * descriptor.
82 */
83 switch (*p) {
84 case ESI_DESC_ENTRY_POINT:
85 break;
86 default:
87 printk(KERN_WARNING "Unkown table type %d found in "
88 "ESI table, ignoring rest of table\n", *p);
89 return -ENODEV;
90 }
91
92 p += ESI_DESC_SIZE(*p);
93 }
94
95 esi_systab = systab;
96 return 0;
97}
98
99
100int ia64_esi_call (efi_guid_t guid, struct ia64_sal_retval *isrvp,
101 enum esi_proc_type proc_type, u64 func,
102 u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6,
103 u64 arg7)
104{
105 struct ia64_fpreg fr[6];
106 unsigned long flags = 0;
107 int i;
108 char *p;
109
110 if (!esi_systab)
111 return -1;
112
113 p = (char *) (esi_systab + 1);
114 for (i = 0; i < esi_systab->entry_count; i++) {
115 if (*p == ESI_DESC_ENTRY_POINT) {
116 ia64_esi_desc_entry_point_t *esi = (void *)p;
117 if (!efi_guidcmp(guid, esi->guid)) {
118 ia64_sal_handler esi_proc;
119 struct pdesc pdesc;
120
121 pdesc.addr = __va(esi->esi_proc);
122 pdesc.gp = __va(esi->gp);
123
124 esi_proc = (ia64_sal_handler) &pdesc;
125
126 ia64_save_scratch_fpregs(fr);
127 if (proc_type == ESI_PROC_SERIALIZED)
128 spin_lock_irqsave(&sal_lock, flags);
129 else if (proc_type == ESI_PROC_MP_SAFE)
130 local_irq_save(flags);
131 else
132 preempt_disable();
133 *isrvp = (*esi_proc)(func, arg1, arg2, arg3,
134 arg4, arg5, arg6, arg7);
135 if (proc_type == ESI_PROC_SERIALIZED)
136 spin_unlock_irqrestore(&sal_lock,
137 flags);
138 else if (proc_type == ESI_PROC_MP_SAFE)
139 local_irq_restore(flags);
140 else
141 preempt_enable();
142 ia64_load_scratch_fpregs(fr);
143 return 0;
144 }
145 }
146 p += ESI_DESC_SIZE(*p);
147 }
148 return -1;
149}
150EXPORT_SYMBOL_GPL(ia64_esi_call);
151
152int ia64_esi_call_phys (efi_guid_t guid, struct ia64_sal_retval *isrvp,
153 u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4,
154 u64 arg5, u64 arg6, u64 arg7)
155{
156 struct ia64_fpreg fr[6];
157 unsigned long flags;
158 u64 esi_params[8];
159 char *p;
160 int i;
161
162 if (!esi_systab)
163 return -1;
164
165 p = (char *) (esi_systab + 1);
166 for (i = 0; i < esi_systab->entry_count; i++) {
167 if (*p == ESI_DESC_ENTRY_POINT) {
168 ia64_esi_desc_entry_point_t *esi = (void *)p;
169 if (!efi_guidcmp(guid, esi->guid)) {
170 ia64_sal_handler esi_proc;
171 struct pdesc pdesc;
172
173 pdesc.addr = (void *)esi->esi_proc;
174 pdesc.gp = (void *)esi->gp;
175
176 esi_proc = (ia64_sal_handler) &pdesc;
177
178 esi_params[0] = func;
179 esi_params[1] = arg1;
180 esi_params[2] = arg2;
181 esi_params[3] = arg3;
182 esi_params[4] = arg4;
183 esi_params[5] = arg5;
184 esi_params[6] = arg6;
185 esi_params[7] = arg7;
186 ia64_save_scratch_fpregs(fr);
187 spin_lock_irqsave(&sal_lock, flags);
188 *isrvp = esi_call_phys(esi_proc, esi_params);
189 spin_unlock_irqrestore(&sal_lock, flags);
190 ia64_load_scratch_fpregs(fr);
191 return 0;
192 }
193 }
194 p += ESI_DESC_SIZE(*p);
195 }
196 return -1;
197}
198EXPORT_SYMBOL_GPL(ia64_esi_call_phys);
199
200static void __exit esi_exit (void)
201{
202}
203
204module_init(esi_init);
205module_exit(esi_exit); /* makes module removable... */
diff --git a/arch/ia64/kernel/esi_stub.S b/arch/ia64/kernel/esi_stub.S
new file mode 100644
index 000000000000..6b3d6c1f99b6
--- /dev/null
+++ b/arch/ia64/kernel/esi_stub.S
@@ -0,0 +1,96 @@
1/*
2 * ESI call stub.
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Alex Williamson <alex.williamson@hp.com>
6 *
7 * Based on EFI call stub by David Mosberger. The stub is virtually
8 * identical to the one for EFI phys-mode calls, except that ESI
9 * calls may have up to 8 arguments, so they get passed to this routine
10 * through memory.
11 *
12 * This stub allows us to make ESI calls in physical mode with interrupts
13 * turned off. ESI calls may not support calling from virtual mode.
14 *
15 * Google for "Extensible SAL specification" for a document describing the
16 * ESI standard.
17 */
18
19/*
20 * PSR settings as per SAL spec (Chapter 8 in the "IA-64 System
21 * Abstraction Layer Specification", revision 2.6e). Note that
22 * psr.dfl and psr.dfh MUST be cleared, despite what this manual says.
23 * Otherwise, SAL dies whenever it's trying to do an IA-32 BIOS call
24 * (the br.ia instruction fails unless psr.dfl and psr.dfh are
25 * cleared). Fortunately, SAL promises not to touch the floating
26 * point regs, so at least we don't have to save f2-f127.
27 */
28#define PSR_BITS_TO_CLEAR \
29 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
30 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
31 IA64_PSR_DFL | IA64_PSR_DFH)
32
33#define PSR_BITS_TO_SET \
34 (IA64_PSR_BN)
35
36#include <asm/processor.h>
37#include <asm/asmmacro.h>
38
39/*
40 * Inputs:
41 * in0 = address of function descriptor of ESI routine to call
42 * in1 = address of array of ESI parameters
43 *
44 * Outputs:
45 * r8 = result returned by called function
46 */
47GLOBAL_ENTRY(esi_call_phys)
48 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
49 alloc loc1=ar.pfs,2,7,8,0
50 ld8 r2=[in0],8 // load ESI function's entry point
51 mov loc0=rp
52 .body
53 ;;
54 ld8 out0=[in1],8 // ESI params loaded from array
55 ;; // passing all as inputs doesn't work
56 ld8 out1=[in1],8
57 ;;
58 ld8 out2=[in1],8
59 ;;
60 ld8 out3=[in1],8
61 ;;
62 ld8 out4=[in1],8
63 ;;
64 ld8 out5=[in1],8
65 ;;
66 ld8 out6=[in1],8
67 ;;
68 ld8 out7=[in1]
69 mov loc2=gp // save global pointer
70 mov loc4=ar.rsc // save RSE configuration
71 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
72 ;;
73 ld8 gp=[in0] // load ESI function's global pointer
74 movl r16=PSR_BITS_TO_CLEAR
75 mov loc3=psr // save processor status word
76 movl r17=PSR_BITS_TO_SET
77 ;;
78 or loc3=loc3,r17
79 mov b6=r2
80 ;;
81 andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
82 br.call.sptk.many rp=ia64_switch_mode_phys
83.ret0: mov loc5=r19 // old ar.bsp
84 mov loc6=r20 // old sp
85 br.call.sptk.many rp=b6 // call the ESI function
86.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
87 mov r16=loc3 // save virtual mode psr
88 mov r19=loc5 // save virtual mode bspstore
89 mov r20=loc6 // save virtual mode sp
90 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
91.ret2: mov ar.rsc=loc4 // restore RSE configuration
92 mov ar.pfs=loc1
93 mov rp=loc0
94 mov gp=loc2
95 br.ret.sptk.many rp
96END(esi_call_phys)
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 3ead20fb6f4b..879c1817bd1c 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -105,5 +105,9 @@ EXPORT_SYMBOL(ia64_spinlock_contention);
105# endif 105# endif
106#endif 106#endif
107 107
108#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
109extern void esi_call_phys (void);
110EXPORT_SYMBOL_GPL(esi_call_phys);
111#endif
108extern char ia64_ivt[]; 112extern char ia64_ivt[];
109EXPORT_SYMBOL(ia64_ivt); 113EXPORT_SYMBOL(ia64_ivt);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 781960f80b6f..169ec3a7156c 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -136,10 +136,8 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
136static int __kprobes unsupported_inst(uint template, uint slot, 136static int __kprobes unsupported_inst(uint template, uint slot,
137 uint major_opcode, 137 uint major_opcode,
138 unsigned long kprobe_inst, 138 unsigned long kprobe_inst,
139 struct kprobe *p) 139 unsigned long addr)
140{ 140{
141 unsigned long addr = (unsigned long)p->addr;
142
143 if (bundle_encoding[template][slot] == I) { 141 if (bundle_encoding[template][slot] == I) {
144 switch (major_opcode) { 142 switch (major_opcode) {
145 case 0x0: //I_UNIT_MISC_OPCODE: 143 case 0x0: //I_UNIT_MISC_OPCODE:
@@ -217,7 +215,7 @@ static void __kprobes prepare_break_inst(uint template, uint slot,
217 struct kprobe *p) 215 struct kprobe *p)
218{ 216{
219 unsigned long break_inst = BREAK_INST; 217 unsigned long break_inst = BREAK_INST;
220 bundle_t *bundle = &p->ainsn.insn.bundle; 218 bundle_t *bundle = &p->opcode.bundle;
221 219
222 /* 220 /*
223 * Copy the original kprobe_inst qualifying predicate(qp) 221 * Copy the original kprobe_inst qualifying predicate(qp)
@@ -423,11 +421,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
423 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 421 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
424 unsigned long kprobe_inst=0; 422 unsigned long kprobe_inst=0;
425 unsigned int slot = addr & 0xf, template, major_opcode = 0; 423 unsigned int slot = addr & 0xf, template, major_opcode = 0;
426 bundle_t *bundle = &p->ainsn.insn.bundle; 424 bundle_t *bundle;
427
428 memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
429 memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
430 425
426 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
431 template = bundle->quad0.template; 427 template = bundle->quad0.template;
432 428
433 if(valid_kprobe_addr(template, slot, addr)) 429 if(valid_kprobe_addr(template, slot, addr))
@@ -440,20 +436,19 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
440 /* Get kprobe_inst and major_opcode from the bundle */ 436 /* Get kprobe_inst and major_opcode from the bundle */
441 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 437 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
442 438
443 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p)) 439 if (unsupported_inst(template, slot, major_opcode, kprobe_inst, addr))
444 return -EINVAL; 440 return -EINVAL;
445 441
446 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
447 442
448 return 0; 443 p->ainsn.insn = get_insn_slot();
449} 444 if (!p->ainsn.insn)
445 return -ENOMEM;
446 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
447 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
450 448
451void __kprobes flush_insn_slot(struct kprobe *p) 449 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
452{
453 unsigned long arm_addr;
454 450
455 arm_addr = ((unsigned long)&p->opcode.bundle) & ~0xFULL; 451 return 0;
456 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
457} 452}
458 453
459void __kprobes arch_arm_kprobe(struct kprobe *p) 454void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -461,9 +456,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
461 unsigned long addr = (unsigned long)p->addr; 456 unsigned long addr = (unsigned long)p->addr;
462 unsigned long arm_addr = addr & ~0xFULL; 457 unsigned long arm_addr = addr & ~0xFULL;
463 458
464 flush_insn_slot(p); 459 flush_icache_range((unsigned long)p->ainsn.insn,
465 memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t)); 460 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
466 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 461 memcpy((char *)arm_addr, &p->opcode, sizeof(kprobe_opcode_t));
462 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
467} 463}
468 464
469void __kprobes arch_disarm_kprobe(struct kprobe *p) 465void __kprobes arch_disarm_kprobe(struct kprobe *p)
@@ -471,11 +467,18 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
471 unsigned long addr = (unsigned long)p->addr; 467 unsigned long addr = (unsigned long)p->addr;
472 unsigned long arm_addr = addr & ~0xFULL; 468 unsigned long arm_addr = addr & ~0xFULL;
473 469
474 /* p->opcode contains the original unaltered bundle */ 470 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
475 memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t)); 471 memcpy((char *) arm_addr, (char *) p->ainsn.insn,
476 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 472 sizeof(kprobe_opcode_t));
473 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
477} 474}
478 475
476void __kprobes arch_remove_kprobe(struct kprobe *p)
477{
478 mutex_lock(&kprobe_mutex);
479 free_insn_slot(p->ainsn.insn);
480 mutex_unlock(&kprobe_mutex);
481}
479/* 482/*
480 * We are resuming execution after a single step fault, so the pt_regs 483 * We are resuming execution after a single step fault, so the pt_regs
481 * structure reflects the register state after we executed the instruction 484 * structure reflects the register state after we executed the instruction
@@ -486,12 +489,12 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
486 */ 489 */
487static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 490static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
488{ 491{
489 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; 492 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
490 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 493 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
491 unsigned long template; 494 unsigned long template;
492 int slot = ((unsigned long)p->addr & 0xf); 495 int slot = ((unsigned long)p->addr & 0xf);
493 496
494 template = p->opcode.bundle.quad0.template; 497 template = p->ainsn.insn->bundle.quad0.template;
495 498
496 if (slot == 1 && bundle_encoding[template][1] == L) 499 if (slot == 1 && bundle_encoding[template][1] == L)
497 slot = 2; 500 slot = 2;
@@ -553,7 +556,7 @@ turn_ss_off:
553 556
554static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) 557static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
555{ 558{
556 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; 559 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle;
557 unsigned long slot = (unsigned long)p->addr & 0xf; 560 unsigned long slot = (unsigned long)p->addr & 0xf;
558 561
559 /* single step inline if break instruction */ 562 /* single step inline if break instruction */
@@ -768,6 +771,12 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
768 */ 771 */
769 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 772 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
770 return 1; 773 return 1;
774 /*
775 * In case the user-specified fault handler returned
776 * zero, try to fix up.
777 */
778 if (ia64_done_with_exception(regs))
779 return 1;
771 780
772 /* 781 /*
773 * Let ia64_do_page_fault() fix it. 782 * Let ia64_do_page_fault() fix it.
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 2fbe4536fe18..bfbd8986153b 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -54,6 +54,9 @@
54 * 54 *
55 * 2005-10-07 Keith Owens <kaos@sgi.com> 55 * 2005-10-07 Keith Owens <kaos@sgi.com>
56 * Add notify_die() hooks. 56 * Add notify_die() hooks.
57 *
58 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
59 * Add printing support for MCA/INIT.
57 */ 60 */
58#include <linux/types.h> 61#include <linux/types.h>
59#include <linux/init.h> 62#include <linux/init.h>
@@ -136,11 +139,175 @@ extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
136 139
137static int mca_init __initdata; 140static int mca_init __initdata;
138 141
142/*
143 * limited & delayed printing support for MCA/INIT handler
144 */
145
146#define mprintk(fmt...) ia64_mca_printk(fmt)
147
148#define MLOGBUF_SIZE (512+256*NR_CPUS)
149#define MLOGBUF_MSGMAX 256
150static char mlogbuf[MLOGBUF_SIZE];
151static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */
152static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */
153static unsigned long mlogbuf_start;
154static unsigned long mlogbuf_end;
155static unsigned int mlogbuf_finished = 0;
156static unsigned long mlogbuf_timestamp = 0;
157
158static int loglevel_save = -1;
159#define BREAK_LOGLEVEL(__console_loglevel) \
160 oops_in_progress = 1; \
161 if (loglevel_save < 0) \
162 loglevel_save = __console_loglevel; \
163 __console_loglevel = 15;
164
165#define RESTORE_LOGLEVEL(__console_loglevel) \
166 if (loglevel_save >= 0) { \
167 __console_loglevel = loglevel_save; \
168 loglevel_save = -1; \
169 } \
170 mlogbuf_finished = 0; \
171 oops_in_progress = 0;
172
173/*
174 * Push messages into buffer, print them later if not urgent.
175 */
176void ia64_mca_printk(const char *fmt, ...)
177{
178 va_list args;
179 int printed_len;
180 char temp_buf[MLOGBUF_MSGMAX];
181 char *p;
182
183 va_start(args, fmt);
184 printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
185 va_end(args);
186
187 /* Copy the output into mlogbuf */
188 if (oops_in_progress) {
189 /* mlogbuf was abandoned, use printk directly instead. */
190 printk(temp_buf);
191 } else {
192 spin_lock(&mlogbuf_wlock);
193 for (p = temp_buf; *p; p++) {
194 unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
195 if (next != mlogbuf_start) {
196 mlogbuf[mlogbuf_end] = *p;
197 mlogbuf_end = next;
198 } else {
199 /* buffer full */
200 break;
201 }
202 }
203 mlogbuf[mlogbuf_end] = '\0';
204 spin_unlock(&mlogbuf_wlock);
205 }
206}
207EXPORT_SYMBOL(ia64_mca_printk);
208
209/*
210 * Print buffered messages.
211 * NOTE: call this after returning normal context. (ex. from salinfod)
212 */
213void ia64_mlogbuf_dump(void)
214{
215 char temp_buf[MLOGBUF_MSGMAX];
216 char *p;
217 unsigned long index;
218 unsigned long flags;
219 unsigned int printed_len;
220
221 /* Get output from mlogbuf */
222 while (mlogbuf_start != mlogbuf_end) {
223 temp_buf[0] = '\0';
224 p = temp_buf;
225 printed_len = 0;
226
227 spin_lock_irqsave(&mlogbuf_rlock, flags);
228
229 index = mlogbuf_start;
230 while (index != mlogbuf_end) {
231 *p = mlogbuf[index];
232 index = (index + 1) % MLOGBUF_SIZE;
233 if (!*p)
234 break;
235 p++;
236 if (++printed_len >= MLOGBUF_MSGMAX - 1)
237 break;
238 }
239 *p = '\0';
240 if (temp_buf[0])
241 printk(temp_buf);
242 mlogbuf_start = index;
243
244 mlogbuf_timestamp = 0;
245 spin_unlock_irqrestore(&mlogbuf_rlock, flags);
246 }
247}
248EXPORT_SYMBOL(ia64_mlogbuf_dump);
249
250/*
251 * Call this if system is going to down or if immediate flushing messages to
252 * console is required. (ex. recovery was failed, crash dump is going to be
253 * invoked, long-wait rendezvous etc.)
254 * NOTE: this should be called from monarch.
255 */
256static void ia64_mlogbuf_finish(int wait)
257{
258 BREAK_LOGLEVEL(console_loglevel);
259
260 spin_lock_init(&mlogbuf_rlock);
261 ia64_mlogbuf_dump();
262 printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
263 "MCA/INIT might be dodgy or fail.\n");
264
265 if (!wait)
266 return;
267
268 /* wait for console */
269 printk("Delaying for 5 seconds...\n");
270 udelay(5*1000000);
271
272 mlogbuf_finished = 1;
273}
274EXPORT_SYMBOL(ia64_mlogbuf_finish);
275
276/*
277 * Print buffered messages from INIT context.
278 */
279static void ia64_mlogbuf_dump_from_init(void)
280{
281 if (mlogbuf_finished)
282 return;
283
284 if (mlogbuf_timestamp && (mlogbuf_timestamp + 30*HZ > jiffies)) {
285 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
286 " and the system seems to be messed up.\n");
287 ia64_mlogbuf_finish(0);
288 return;
289 }
290
291 if (!spin_trylock(&mlogbuf_rlock)) {
292 printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
293 "Generated messages other than stack dump will be "
294 "buffered to mlogbuf and will be printed later.\n");
295 printk(KERN_ERR "INIT: If messages would not printed after "
296 "this INIT, wait 30sec and assert INIT again.\n");
297 if (!mlogbuf_timestamp)
298 mlogbuf_timestamp = jiffies;
299 return;
300 }
301 spin_unlock(&mlogbuf_rlock);
302 ia64_mlogbuf_dump();
303}
139 304
140static void inline 305static void inline
141ia64_mca_spin(const char *func) 306ia64_mca_spin(const char *func)
142{ 307{
143 printk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); 308 if (monarch_cpu == smp_processor_id())
309 ia64_mlogbuf_finish(0);
310 mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
144 while (1) 311 while (1)
145 cpu_relax(); 312 cpu_relax();
146} 313}
@@ -344,9 +511,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
344 /* SAL spec states this should run w/ interrupts enabled */ 511 /* SAL spec states this should run w/ interrupts enabled */
345 local_irq_enable(); 512 local_irq_enable();
346 513
347 /* Get the CPE error record and log it */
348 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
349
350 spin_lock(&cpe_history_lock); 514 spin_lock(&cpe_history_lock);
351 if (!cpe_poll_enabled && cpe_vector >= 0) { 515 if (!cpe_poll_enabled && cpe_vector >= 0) {
352 516
@@ -375,7 +539,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
375 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); 539 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
376 540
377 /* lock already released, get out now */ 541 /* lock already released, get out now */
378 return IRQ_HANDLED; 542 goto out;
379 } else { 543 } else {
380 cpe_history[index++] = now; 544 cpe_history[index++] = now;
381 if (index == CPE_HISTORY_LENGTH) 545 if (index == CPE_HISTORY_LENGTH)
@@ -383,6 +547,10 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
383 } 547 }
384 } 548 }
385 spin_unlock(&cpe_history_lock); 549 spin_unlock(&cpe_history_lock);
550out:
551 /* Get the CPE error record and log it */
552 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
553
386 return IRQ_HANDLED; 554 return IRQ_HANDLED;
387} 555}
388 556
@@ -988,18 +1156,22 @@ ia64_wait_for_slaves(int monarch, const char *type)
988 } 1156 }
989 if (!missing) 1157 if (!missing)
990 goto all_in; 1158 goto all_in;
991 printk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); 1159 /*
1160 * Maybe slave(s) dead. Print buffered messages immediately.
1161 */
1162 ia64_mlogbuf_finish(0);
1163 mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
992 for_each_online_cpu(c) { 1164 for_each_online_cpu(c) {
993 if (c == monarch) 1165 if (c == monarch)
994 continue; 1166 continue;
995 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) 1167 if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
996 printk(" %d", c); 1168 mprintk(" %d", c);
997 } 1169 }
998 printk("\n"); 1170 mprintk("\n");
999 return; 1171 return;
1000 1172
1001all_in: 1173all_in:
1002 printk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); 1174 mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
1003 return; 1175 return;
1004} 1176}
1005 1177
@@ -1027,10 +1199,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1027 struct ia64_mca_notify_die nd = 1199 struct ia64_mca_notify_die nd =
1028 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1200 { .sos = sos, .monarch_cpu = &monarch_cpu };
1029 1201
1030 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ 1202 mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
1031 console_loglevel = 15; /* make sure printks make it to console */ 1203 "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
1032 printk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d monarch=%ld\n",
1033 sos->proc_state_param, cpu, sos->monarch);
1034 1204
1035 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 1205 previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1036 monarch_cpu = cpu; 1206 monarch_cpu = cpu;
@@ -1066,6 +1236,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1066 rh->severity = sal_log_severity_corrected; 1236 rh->severity = sal_log_severity_corrected;
1067 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); 1237 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1068 sos->os_status = IA64_MCA_CORRECTED; 1238 sos->os_status = IA64_MCA_CORRECTED;
1239 } else {
1240 /* Dump buffered message to console */
1241 ia64_mlogbuf_finish(1);
1069 } 1242 }
1070 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1243 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1071 == NOTIFY_STOP) 1244 == NOTIFY_STOP)
@@ -1106,9 +1279,6 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1106 /* SAL spec states this should run w/ interrupts enabled */ 1279 /* SAL spec states this should run w/ interrupts enabled */
1107 local_irq_enable(); 1280 local_irq_enable();
1108 1281
1109 /* Get the CMC error record and log it */
1110 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1111
1112 spin_lock(&cmc_history_lock); 1282 spin_lock(&cmc_history_lock);
1113 if (!cmc_polling_enabled) { 1283 if (!cmc_polling_enabled) {
1114 int i, count = 1; /* we know 1 happened now */ 1284 int i, count = 1; /* we know 1 happened now */
@@ -1141,7 +1311,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1141 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 1311 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1142 1312
1143 /* lock already released, get out now */ 1313 /* lock already released, get out now */
1144 return IRQ_HANDLED; 1314 goto out;
1145 } else { 1315 } else {
1146 cmc_history[index++] = now; 1316 cmc_history[index++] = now;
1147 if (index == CMC_HISTORY_LENGTH) 1317 if (index == CMC_HISTORY_LENGTH)
@@ -1149,6 +1319,10 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1149 } 1319 }
1150 } 1320 }
1151 spin_unlock(&cmc_history_lock); 1321 spin_unlock(&cmc_history_lock);
1322out:
1323 /* Get the CMC error record and log it */
1324 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1325
1152 return IRQ_HANDLED; 1326 return IRQ_HANDLED;
1153} 1327}
1154 1328
@@ -1305,6 +1479,15 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi
1305 struct task_struct *g, *t; 1479 struct task_struct *g, *t;
1306 if (val != DIE_INIT_MONARCH_PROCESS) 1480 if (val != DIE_INIT_MONARCH_PROCESS)
1307 return NOTIFY_DONE; 1481 return NOTIFY_DONE;
1482
1483 /*
1484 * FIXME: mlogbuf will brim over with INIT stack dumps.
1485 * To enable show_stack from INIT, we use oops_in_progress which should
1486 * be used in real oops. This would cause something wrong after INIT.
1487 */
1488 BREAK_LOGLEVEL(console_loglevel);
1489 ia64_mlogbuf_dump_from_init();
1490
1308 printk(KERN_ERR "Processes interrupted by INIT -"); 1491 printk(KERN_ERR "Processes interrupted by INIT -");
1309 for_each_online_cpu(c) { 1492 for_each_online_cpu(c) {
1310 struct ia64_sal_os_state *s; 1493 struct ia64_sal_os_state *s;
@@ -1326,6 +1509,8 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi
1326 } while_each_thread (g, t); 1509 } while_each_thread (g, t);
1327 read_unlock(&tasklist_lock); 1510 read_unlock(&tasklist_lock);
1328 } 1511 }
1512 /* FIXME: This will not restore zapped printk locks. */
1513 RESTORE_LOGLEVEL(console_loglevel);
1329 return NOTIFY_DONE; 1514 return NOTIFY_DONE;
1330} 1515}
1331 1516
@@ -1357,12 +1542,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1357 struct ia64_mca_notify_die nd = 1542 struct ia64_mca_notify_die nd =
1358 { .sos = sos, .monarch_cpu = &monarch_cpu }; 1543 { .sos = sos, .monarch_cpu = &monarch_cpu };
1359 1544
1360 oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
1361 console_loglevel = 15; /* make sure printks make it to console */
1362
1363 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0); 1545 (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
1364 1546
1365 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1547 mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1366 sos->proc_state_param, cpu, sos->monarch); 1548 sos->proc_state_param, cpu, sos->monarch);
1367 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); 1549 salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
1368 1550
@@ -1375,7 +1557,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1375 * fix their proms and get their customers updated. 1557 * fix their proms and get their customers updated.
1376 */ 1558 */
1377 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1559 if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1378 printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1560 mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1379 __FUNCTION__, cpu); 1561 __FUNCTION__, cpu);
1380 atomic_dec(&slaves); 1562 atomic_dec(&slaves);
1381 sos->monarch = 1; 1563 sos->monarch = 1;
@@ -1387,7 +1569,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1387 * fix their proms and get their customers updated. 1569 * fix their proms and get their customers updated.
1388 */ 1570 */
1389 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1571 if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1390 printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1572 mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1391 __FUNCTION__, cpu); 1573 __FUNCTION__, cpu);
1392 atomic_dec(&monarchs); 1574 atomic_dec(&monarchs);
1393 sos->monarch = 0; 1575 sos->monarch = 0;
@@ -1408,7 +1590,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1408 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1590 if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1409 == NOTIFY_STOP) 1591 == NOTIFY_STOP)
1410 ia64_mca_spin(__FUNCTION__); 1592 ia64_mca_spin(__FUNCTION__);
1411 printk("Slave on cpu %d returning to normal service.\n", cpu); 1593 mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1412 set_curr_task(cpu, previous_current); 1594 set_curr_task(cpu, previous_current);
1413 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1595 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1414 atomic_dec(&slaves); 1596 atomic_dec(&slaves);
@@ -1426,7 +1608,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1426 * same serial line, the user will need some time to switch out of the BMC before 1608 * same serial line, the user will need some time to switch out of the BMC before
1427 * the dump begins. 1609 * the dump begins.
1428 */ 1610 */
1429 printk("Delaying for 5 seconds...\n"); 1611 mprintk("Delaying for 5 seconds...\n");
1430 udelay(5*1000000); 1612 udelay(5*1000000);
1431 ia64_wait_for_slaves(cpu, "INIT"); 1613 ia64_wait_for_slaves(cpu, "INIT");
1432 /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through 1614 /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
@@ -1439,7 +1621,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1439 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) 1621 if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
1440 == NOTIFY_STOP) 1622 == NOTIFY_STOP)
1441 ia64_mca_spin(__FUNCTION__); 1623 ia64_mca_spin(__FUNCTION__);
1442 printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1624 mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
1443 atomic_dec(&monarchs); 1625 atomic_dec(&monarchs);
1444 set_curr_task(cpu, previous_current); 1626 set_curr_task(cpu, previous_current);
1445 monarch_cpu = -1; 1627 monarch_cpu = -1;
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 96047491d1b9..c6b607c00dee 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -1025,18 +1025,13 @@ ia64_old_stack:
1025 1025
1026ia64_set_kernel_registers: 1026ia64_set_kernel_registers:
1027 add temp3=MCA_SP_OFFSET, r3 1027 add temp3=MCA_SP_OFFSET, r3
1028 add temp4=MCA_SOS_OFFSET+SOS(OS_GP), r3
1029 mov b0=r2 // save return address 1028 mov b0=r2 // save return address
1030 GET_IA64_MCA_DATA(temp1) 1029 GET_IA64_MCA_DATA(temp1)
1031 ;; 1030 ;;
1032 add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp
1033 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack 1031 add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
1034 add r13=temp1, r3 // set current to start of MCA/INIT stack 1032 add r13=temp1, r3 // set current to start of MCA/INIT stack
1035 add r20=temp1, r3 // physical start of MCA/INIT stack 1033 add r20=temp1, r3 // physical start of MCA/INIT stack
1036 ;; 1034 ;;
1037 ld8 r1=[temp4] // OS GP from SAL OS state
1038 ;;
1039 DATA_PA_TO_VA(r1,temp1)
1040 DATA_PA_TO_VA(r12,temp2) 1035 DATA_PA_TO_VA(r12,temp2)
1041 DATA_PA_TO_VA(r13,temp3) 1036 DATA_PA_TO_VA(r13,temp3)
1042 ;; 1037 ;;
@@ -1067,6 +1062,10 @@ ia64_set_kernel_registers:
1067 mov cr.itir=r18 1062 mov cr.itir=r18
1068 mov cr.ifa=r13 1063 mov cr.ifa=r13
1069 mov r20=IA64_TR_CURRENT_STACK 1064 mov r20=IA64_TR_CURRENT_STACK
1065
1066 movl r17=FPSR_DEFAULT
1067 ;;
1068 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1070 ;; 1069 ;;
1071 itr.d dtr[r20]=r21 1070 itr.d dtr[r20]=r21
1072 ;; 1071 ;;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 8db6e0cedadc..a45009d2bc90 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -79,14 +79,30 @@ static int
79fatal_mca(const char *fmt, ...) 79fatal_mca(const char *fmt, ...)
80{ 80{
81 va_list args; 81 va_list args;
82 char buf[256];
82 83
83 va_start(args, fmt); 84 va_start(args, fmt);
84 vprintk(fmt, args); 85 vsnprintf(buf, sizeof(buf), fmt, args);
85 va_end(args); 86 va_end(args);
87 ia64_mca_printk(KERN_ALERT "MCA: %s\n", buf);
86 88
87 return MCA_NOT_RECOVERED; 89 return MCA_NOT_RECOVERED;
88} 90}
89 91
92static int
93mca_recovered(const char *fmt, ...)
94{
95 va_list args;
96 char buf[256];
97
98 va_start(args, fmt);
99 vsnprintf(buf, sizeof(buf), fmt, args);
100 va_end(args);
101 ia64_mca_printk(KERN_INFO "MCA: %s\n", buf);
102
103 return MCA_RECOVERED;
104}
105
90/** 106/**
91 * mca_page_isolate - isolate a poisoned page in order not to use it later 107 * mca_page_isolate - isolate a poisoned page in order not to use it later
92 * @paddr: poisoned memory location 108 * @paddr: poisoned memory location
@@ -140,6 +156,7 @@ mca_page_isolate(unsigned long paddr)
140void 156void
141mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) 157mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
142{ 158{
159 ia64_mlogbuf_dump();
143 printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, " 160 printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
144 "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n", 161 "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
145 raw_smp_processor_id(), current->pid, current->uid, 162 raw_smp_processor_id(), current->pid, current->uid,
@@ -440,7 +457,7 @@ recover_from_read_error(slidx_table_t *slidx,
440 457
441 /* Is target address valid? */ 458 /* Is target address valid? */
442 if (!pbci->tv) 459 if (!pbci->tv)
443 return fatal_mca(KERN_ALERT "MCA: target address not valid\n"); 460 return fatal_mca("target address not valid");
444 461
445 /* 462 /*
446 * cpu read or memory-mapped io read 463 * cpu read or memory-mapped io read
@@ -458,7 +475,7 @@ recover_from_read_error(slidx_table_t *slidx,
458 475
459 /* Is minstate valid? */ 476 /* Is minstate valid? */
460 if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate)) 477 if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
461 return fatal_mca(KERN_ALERT "MCA: minstate not valid\n"); 478 return fatal_mca("minstate not valid");
462 psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr); 479 psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
463 psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr); 480 psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
464 481
@@ -492,13 +509,14 @@ recover_from_read_error(slidx_table_t *slidx,
492 psr2->bn = 1; 509 psr2->bn = 1;
493 psr2->i = 0; 510 psr2->i = 0;
494 511
495 return MCA_RECOVERED; 512 return mca_recovered("user memory corruption. "
513 "kill affected process - recovered.");
496 } 514 }
497 515
498 } 516 }
499 517
500 return fatal_mca(KERN_ALERT "MCA: kernel context not recovered," 518 return fatal_mca("kernel context not recovered, iip 0x%lx\n",
501 " iip 0x%lx\n", pmsa->pmsa_iip); 519 pmsa->pmsa_iip);
502} 520}
503 521
504/** 522/**
@@ -584,13 +602,13 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
584 * The machine check is corrected. 602 * The machine check is corrected.
585 */ 603 */
586 if (psp->cm == 1) 604 if (psp->cm == 1)
587 return MCA_RECOVERED; 605 return mca_recovered("machine check is already corrected.");
588 606
589 /* 607 /*
590 * The error was not contained. Software must be reset. 608 * The error was not contained. Software must be reset.
591 */ 609 */
592 if (psp->us || psp->ci == 0) 610 if (psp->us || psp->ci == 0)
593 return fatal_mca(KERN_ALERT "MCA: error not contained\n"); 611 return fatal_mca("error not contained");
594 612
595 /* 613 /*
596 * The cache check and bus check bits have four possible states 614 * The cache check and bus check bits have four possible states
@@ -601,22 +619,22 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
601 * 1 1 Memory error, attempt recovery 619 * 1 1 Memory error, attempt recovery
602 */ 620 */
603 if (psp->bc == 0 || pbci == NULL) 621 if (psp->bc == 0 || pbci == NULL)
604 return fatal_mca(KERN_ALERT "MCA: No bus check\n"); 622 return fatal_mca("No bus check");
605 623
606 /* 624 /*
607 * Sorry, we cannot handle so many. 625 * Sorry, we cannot handle so many.
608 */ 626 */
609 if (peidx_bus_check_num(peidx) > 1) 627 if (peidx_bus_check_num(peidx) > 1)
610 return fatal_mca(KERN_ALERT "MCA: Too many bus checks\n"); 628 return fatal_mca("Too many bus checks");
611 /* 629 /*
612 * Well, here is only one bus error. 630 * Well, here is only one bus error.
613 */ 631 */
614 if (pbci->ib) 632 if (pbci->ib)
615 return fatal_mca(KERN_ALERT "MCA: Internal Bus error\n"); 633 return fatal_mca("Internal Bus error");
616 if (pbci->cc) 634 if (pbci->cc)
617 return fatal_mca(KERN_ALERT "MCA: Cache-cache error\n"); 635 return fatal_mca("Cache-cache error");
618 if (pbci->eb && pbci->bsi > 0) 636 if (pbci->eb && pbci->bsi > 0)
619 return fatal_mca(KERN_ALERT "MCA: External bus check fatal status\n"); 637 return fatal_mca("External bus check fatal status");
620 638
621 /* 639 /*
622 * This is a local MCA and estimated as recoverble external bus error. 640 * This is a local MCA and estimated as recoverble external bus error.
@@ -628,7 +646,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
628 /* 646 /*
629 * On account of strange SAL error record, we cannot recover. 647 * On account of strange SAL error record, we cannot recover.
630 */ 648 */
631 return fatal_mca(KERN_ALERT "MCA: Strange SAL record\n"); 649 return fatal_mca("Strange SAL record");
632} 650}
633 651
634/** 652/**
@@ -657,10 +675,10 @@ mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
657 675
658 /* Now, OS can recover when there is one processor error section */ 676 /* Now, OS can recover when there is one processor error section */
659 if (n_proc_err > 1) 677 if (n_proc_err > 1)
660 return fatal_mca(KERN_ALERT "MCA: Too Many Errors\n"); 678 return fatal_mca("Too Many Errors");
661 else if (n_proc_err == 0) 679 else if (n_proc_err == 0)
662 /* Weird SAL record ... We need not to recover */ 680 /* Weird SAL record ... We can't do anything */
663 return fatal_mca(KERN_ALERT "MCA: Weird SAL record\n"); 681 return fatal_mca("Weird SAL record");
664 682
665 /* Make index of processor error section */ 683 /* Make index of processor error section */
666 mca_make_peidx((sal_log_processor_info_t*) 684 mca_make_peidx((sal_log_processor_info_t*)
@@ -671,7 +689,7 @@ mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
671 689
672 /* Check whether MCA is global or not */ 690 /* Check whether MCA is global or not */
673 if (is_mca_global(&peidx, &pbci, sos)) 691 if (is_mca_global(&peidx, &pbci, sos))
674 return fatal_mca(KERN_ALERT "MCA: global MCA\n"); 692 return fatal_mca("global MCA");
675 693
676 /* Try to recover a processor error */ 694 /* Try to recover a processor error */
677 return recover_from_processor_error(platform_err, &slidx, &peidx, 695 return recover_from_processor_error(platform_err, &slidx, &peidx,
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h
index 31a2e52bb16f..c85e943ba5fd 100644
--- a/arch/ia64/kernel/mca_drv.h
+++ b/arch/ia64/kernel/mca_drv.h
@@ -118,3 +118,7 @@ struct mca_table_entry {
118 118
119extern const struct mca_table_entry *search_mca_tables (unsigned long addr); 119extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
120extern int mca_recover_range(unsigned long); 120extern int mca_recover_range(unsigned long);
121extern void ia64_mca_printk(const char * fmt, ...)
122 __attribute__ ((format (printf, 1, 2)));
123extern void ia64_mlogbuf_dump(void);
124
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index 1cc360c83e7a..20340631179f 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -29,6 +29,36 @@ EXPORT_SYMBOL(cpu_to_node_map);
29 29
30cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 30cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
31 31
32void __cpuinit map_cpu_to_node(int cpu, int nid)
33{
34 int oldnid;
35 if (nid < 0) { /* just initialize by zero */
36 cpu_to_node_map[cpu] = 0;
37 return;
38 }
39 /* sanity check first */
40 oldnid = cpu_to_node_map[cpu];
41 if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) {
42 return; /* nothing to do */
43 }
44 /* we don't have cpu-driven node hot add yet...
45 In usual case, node is created from SRAT at boot time. */
46 if (!node_online(nid))
47 nid = first_online_node;
48 cpu_to_node_map[cpu] = nid;
49 cpu_set(cpu, node_to_cpu_mask[nid]);
50 return;
51}
52
53void __cpuinit unmap_cpu_from_node(int cpu, int nid)
54{
55 WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
56 WARN_ON(cpu_to_node_map[cpu] != nid);
57 cpu_to_node_map[cpu] = 0;
58 cpu_clear(cpu, node_to_cpu_mask[nid]);
59}
60
61
32/** 62/**
33 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays 63 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
34 * 64 *
@@ -49,8 +79,6 @@ void __init build_cpu_to_node_map(void)
49 node = node_cpuid[i].nid; 79 node = node_cpuid[i].nid;
50 break; 80 break;
51 } 81 }
52 cpu_to_node_map[cpu] = (node >= 0) ? node : 0; 82 map_cpu_to_node(cpu, node);
53 if (node >= 0)
54 cpu_set(cpu, node_to_cpu_mask[node]);
55 } 83 }
56} 84}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 84a7e52f56f6..281004ff7b00 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -34,6 +34,7 @@
34#include <linux/file.h> 34#include <linux/file.h>
35#include <linux/poll.h> 35#include <linux/poll.h>
36#include <linux/vfs.h> 36#include <linux/vfs.h>
37#include <linux/smp.h>
37#include <linux/pagemap.h> 38#include <linux/pagemap.h>
38#include <linux/mount.h> 39#include <linux/mount.h>
39#include <linux/bitops.h> 40#include <linux/bitops.h>
@@ -62,6 +63,9 @@
62 63
63#define PFM_INVALID_ACTIVATION (~0UL) 64#define PFM_INVALID_ACTIVATION (~0UL)
64 65
66#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
67#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
68
65/* 69/*
66 * depth of message queue 70 * depth of message queue
67 */ 71 */
@@ -296,14 +300,17 @@ typedef struct pfm_context {
296 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */ 300 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
297 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */ 301 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
298 302
299 unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */ 303 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
300 304
301 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */ 305 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
302 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */ 306 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
303 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */ 307 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
304 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */ 308 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
305 309
306 pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */ 310 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
311
312 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
313 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
307 314
308 u64 ctx_saved_psr_up; /* only contains psr.up value */ 315 u64 ctx_saved_psr_up; /* only contains psr.up value */
309 316
@@ -867,7 +874,6 @@ static void
867pfm_mask_monitoring(struct task_struct *task) 874pfm_mask_monitoring(struct task_struct *task)
868{ 875{
869 pfm_context_t *ctx = PFM_GET_CTX(task); 876 pfm_context_t *ctx = PFM_GET_CTX(task);
870 struct thread_struct *th = &task->thread;
871 unsigned long mask, val, ovfl_mask; 877 unsigned long mask, val, ovfl_mask;
872 int i; 878 int i;
873 879
@@ -888,7 +894,7 @@ pfm_mask_monitoring(struct task_struct *task)
888 * So in both cases, the live register contains the owner's 894 * So in both cases, the live register contains the owner's
889 * state. We can ONLY touch the PMU registers and NOT the PSR. 895 * state. We can ONLY touch the PMU registers and NOT the PSR.
890 * 896 *
891 * As a consequence to this call, the thread->pmds[] array 897 * As a consequence to this call, the ctx->th_pmds[] array
892 * contains stale information which must be ignored 898 * contains stale information which must be ignored
893 * when context is reloaded AND monitoring is active (see 899 * when context is reloaded AND monitoring is active (see
894 * pfm_restart). 900 * pfm_restart).
@@ -923,9 +929,9 @@ pfm_mask_monitoring(struct task_struct *task)
923 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; 929 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
924 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { 930 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
925 if ((mask & 0x1) == 0UL) continue; 931 if ((mask & 0x1) == 0UL) continue;
926 ia64_set_pmc(i, th->pmcs[i] & ~0xfUL); 932 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
927 th->pmcs[i] &= ~0xfUL; 933 ctx->th_pmcs[i] &= ~0xfUL;
928 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i])); 934 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
929 } 935 }
930 /* 936 /*
931 * make all of this visible 937 * make all of this visible
@@ -942,7 +948,6 @@ static void
942pfm_restore_monitoring(struct task_struct *task) 948pfm_restore_monitoring(struct task_struct *task)
943{ 949{
944 pfm_context_t *ctx = PFM_GET_CTX(task); 950 pfm_context_t *ctx = PFM_GET_CTX(task);
945 struct thread_struct *th = &task->thread;
946 unsigned long mask, ovfl_mask; 951 unsigned long mask, ovfl_mask;
947 unsigned long psr, val; 952 unsigned long psr, val;
948 int i, is_system; 953 int i, is_system;
@@ -1008,9 +1013,9 @@ pfm_restore_monitoring(struct task_struct *task)
1008 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; 1013 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1009 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { 1014 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1010 if ((mask & 0x1) == 0UL) continue; 1015 if ((mask & 0x1) == 0UL) continue;
1011 th->pmcs[i] = ctx->ctx_pmcs[i]; 1016 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1012 ia64_set_pmc(i, th->pmcs[i]); 1017 ia64_set_pmc(i, ctx->th_pmcs[i]);
1013 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i])); 1018 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i]));
1014 } 1019 }
1015 ia64_srlz_d(); 1020 ia64_srlz_d();
1016 1021
@@ -1069,7 +1074,6 @@ pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1069static inline void 1074static inline void
1070pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) 1075pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1071{ 1076{
1072 struct thread_struct *thread = &task->thread;
1073 unsigned long ovfl_val = pmu_conf->ovfl_val; 1077 unsigned long ovfl_val = pmu_conf->ovfl_val;
1074 unsigned long mask = ctx->ctx_all_pmds[0]; 1078 unsigned long mask = ctx->ctx_all_pmds[0];
1075 unsigned long val; 1079 unsigned long val;
@@ -1091,11 +1095,11 @@ pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1091 ctx->ctx_pmds[i].val = val & ~ovfl_val; 1095 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1092 val &= ovfl_val; 1096 val &= ovfl_val;
1093 } 1097 }
1094 thread->pmds[i] = val; 1098 ctx->th_pmds[i] = val;
1095 1099
1096 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n", 1100 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1097 i, 1101 i,
1098 thread->pmds[i], 1102 ctx->th_pmds[i],
1099 ctx->ctx_pmds[i].val)); 1103 ctx->ctx_pmds[i].val));
1100 } 1104 }
1101} 1105}
@@ -1106,7 +1110,6 @@ pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1106static inline void 1110static inline void
1107pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) 1111pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1108{ 1112{
1109 struct thread_struct *thread = &task->thread;
1110 unsigned long mask = ctx->ctx_all_pmcs[0]; 1113 unsigned long mask = ctx->ctx_all_pmcs[0];
1111 int i; 1114 int i;
1112 1115
@@ -1114,8 +1117,8 @@ pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1114 1117
1115 for (i=0; mask; i++, mask>>=1) { 1118 for (i=0; mask; i++, mask>>=1) {
1116 /* masking 0 with ovfl_val yields 0 */ 1119 /* masking 0 with ovfl_val yields 0 */
1117 thread->pmcs[i] = ctx->ctx_pmcs[i]; 1120 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1118 DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i])); 1121 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1119 } 1122 }
1120} 1123}
1121 1124
@@ -2859,7 +2862,6 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2859static int 2862static int
2860pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 2863pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2861{ 2864{
2862 struct thread_struct *thread = NULL;
2863 struct task_struct *task; 2865 struct task_struct *task;
2864 pfarg_reg_t *req = (pfarg_reg_t *)arg; 2866 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2865 unsigned long value, pmc_pm; 2867 unsigned long value, pmc_pm;
@@ -2880,7 +2882,6 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2880 if (state == PFM_CTX_ZOMBIE) return -EINVAL; 2882 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2881 2883
2882 if (is_loaded) { 2884 if (is_loaded) {
2883 thread = &task->thread;
2884 /* 2885 /*
2885 * In system wide and when the context is loaded, access can only happen 2886 * In system wide and when the context is loaded, access can only happen
2886 * when the caller is running on the CPU being monitored by the session. 2887 * when the caller is running on the CPU being monitored by the session.
@@ -3035,7 +3036,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3035 * 3036 *
3036 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs(). 3037 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3037 * 3038 *
3038 * The value in thread->pmcs[] may be modified on overflow, i.e., when 3039 * The value in th_pmcs[] may be modified on overflow, i.e., when
3039 * monitoring needs to be stopped. 3040 * monitoring needs to be stopped.
3040 */ 3041 */
3041 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum); 3042 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
@@ -3049,7 +3050,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3049 /* 3050 /*
3050 * write thread state 3051 * write thread state
3051 */ 3052 */
3052 if (is_system == 0) thread->pmcs[cnum] = value; 3053 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3053 3054
3054 /* 3055 /*
3055 * write hardware register if we can 3056 * write hardware register if we can
@@ -3101,7 +3102,6 @@ error:
3101static int 3102static int
3102pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 3103pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3103{ 3104{
3104 struct thread_struct *thread = NULL;
3105 struct task_struct *task; 3105 struct task_struct *task;
3106 pfarg_reg_t *req = (pfarg_reg_t *)arg; 3106 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3107 unsigned long value, hw_value, ovfl_mask; 3107 unsigned long value, hw_value, ovfl_mask;
@@ -3125,7 +3125,6 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3125 * the owner of the local PMU. 3125 * the owner of the local PMU.
3126 */ 3126 */
3127 if (likely(is_loaded)) { 3127 if (likely(is_loaded)) {
3128 thread = &task->thread;
3129 /* 3128 /*
3130 * In system wide and when the context is loaded, access can only happen 3129 * In system wide and when the context is loaded, access can only happen
3131 * when the caller is running on the CPU being monitored by the session. 3130 * when the caller is running on the CPU being monitored by the session.
@@ -3233,7 +3232,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3233 /* 3232 /*
3234 * write thread state 3233 * write thread state
3235 */ 3234 */
3236 if (is_system == 0) thread->pmds[cnum] = hw_value; 3235 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3237 3236
3238 /* 3237 /*
3239 * write hardware register if we can 3238 * write hardware register if we can
@@ -3299,7 +3298,6 @@ abort_mission:
3299static int 3298static int
3300pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 3299pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3301{ 3300{
3302 struct thread_struct *thread = NULL;
3303 struct task_struct *task; 3301 struct task_struct *task;
3304 unsigned long val = 0UL, lval, ovfl_mask, sval; 3302 unsigned long val = 0UL, lval, ovfl_mask, sval;
3305 pfarg_reg_t *req = (pfarg_reg_t *)arg; 3303 pfarg_reg_t *req = (pfarg_reg_t *)arg;
@@ -3323,7 +3321,6 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3323 if (state == PFM_CTX_ZOMBIE) return -EINVAL; 3321 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3324 3322
3325 if (likely(is_loaded)) { 3323 if (likely(is_loaded)) {
3326 thread = &task->thread;
3327 /* 3324 /*
3328 * In system wide and when the context is loaded, access can only happen 3325 * In system wide and when the context is loaded, access can only happen
3329 * when the caller is running on the CPU being monitored by the session. 3326 * when the caller is running on the CPU being monitored by the session.
@@ -3385,7 +3382,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3385 * if context is zombie, then task does not exist anymore. 3382 * if context is zombie, then task does not exist anymore.
3386 * In this case, we use the full value saved in the context (pfm_flush_regs()). 3383 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3387 */ 3384 */
3388 val = is_loaded ? thread->pmds[cnum] : 0UL; 3385 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3389 } 3386 }
3390 rd_func = pmu_conf->pmd_desc[cnum].read_check; 3387 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3391 3388
@@ -4354,8 +4351,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4354 pfm_copy_pmds(task, ctx); 4351 pfm_copy_pmds(task, ctx);
4355 pfm_copy_pmcs(task, ctx); 4352 pfm_copy_pmcs(task, ctx);
4356 4353
4357 pmcs_source = thread->pmcs; 4354 pmcs_source = ctx->th_pmcs;
4358 pmds_source = thread->pmds; 4355 pmds_source = ctx->th_pmds;
4359 4356
4360 /* 4357 /*
4361 * always the case for system-wide 4358 * always the case for system-wide
@@ -5864,14 +5861,12 @@ void
5864pfm_save_regs(struct task_struct *task) 5861pfm_save_regs(struct task_struct *task)
5865{ 5862{
5866 pfm_context_t *ctx; 5863 pfm_context_t *ctx;
5867 struct thread_struct *t;
5868 unsigned long flags; 5864 unsigned long flags;
5869 u64 psr; 5865 u64 psr;
5870 5866
5871 5867
5872 ctx = PFM_GET_CTX(task); 5868 ctx = PFM_GET_CTX(task);
5873 if (ctx == NULL) return; 5869 if (ctx == NULL) return;
5874 t = &task->thread;
5875 5870
5876 /* 5871 /*
5877 * we always come here with interrupts ALREADY disabled by 5872 * we always come here with interrupts ALREADY disabled by
@@ -5929,19 +5924,19 @@ pfm_save_regs(struct task_struct *task)
5929 * guarantee we will be schedule at that same 5924 * guarantee we will be schedule at that same
5930 * CPU again. 5925 * CPU again.
5931 */ 5926 */
5932 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]); 5927 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5933 5928
5934 /* 5929 /*
5935 * save pmc0 ia64_srlz_d() done in pfm_save_pmds() 5930 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5936 * we will need it on the restore path to check 5931 * we will need it on the restore path to check
5937 * for pending overflow. 5932 * for pending overflow.
5938 */ 5933 */
5939 t->pmcs[0] = ia64_get_pmc(0); 5934 ctx->th_pmcs[0] = ia64_get_pmc(0);
5940 5935
5941 /* 5936 /*
5942 * unfreeze PMU if had pending overflows 5937 * unfreeze PMU if had pending overflows
5943 */ 5938 */
5944 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); 5939 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5945 5940
5946 /* 5941 /*
5947 * finally, allow context access. 5942 * finally, allow context access.
@@ -5986,7 +5981,6 @@ static void
5986pfm_lazy_save_regs (struct task_struct *task) 5981pfm_lazy_save_regs (struct task_struct *task)
5987{ 5982{
5988 pfm_context_t *ctx; 5983 pfm_context_t *ctx;
5989 struct thread_struct *t;
5990 unsigned long flags; 5984 unsigned long flags;
5991 5985
5992 { u64 psr = pfm_get_psr(); 5986 { u64 psr = pfm_get_psr();
@@ -5994,7 +5988,6 @@ pfm_lazy_save_regs (struct task_struct *task)
5994 } 5988 }
5995 5989
5996 ctx = PFM_GET_CTX(task); 5990 ctx = PFM_GET_CTX(task);
5997 t = &task->thread;
5998 5991
5999 /* 5992 /*
6000 * we need to mask PMU overflow here to 5993 * we need to mask PMU overflow here to
@@ -6019,19 +6012,19 @@ pfm_lazy_save_regs (struct task_struct *task)
6019 /* 6012 /*
6020 * save all the pmds we use 6013 * save all the pmds we use
6021 */ 6014 */
6022 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]); 6015 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6023 6016
6024 /* 6017 /*
6025 * save pmc0 ia64_srlz_d() done in pfm_save_pmds() 6018 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6026 * it is needed to check for pended overflow 6019 * it is needed to check for pended overflow
6027 * on the restore path 6020 * on the restore path
6028 */ 6021 */
6029 t->pmcs[0] = ia64_get_pmc(0); 6022 ctx->th_pmcs[0] = ia64_get_pmc(0);
6030 6023
6031 /* 6024 /*
6032 * unfreeze PMU if had pending overflows 6025 * unfreeze PMU if had pending overflows
6033 */ 6026 */
6034 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); 6027 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6035 6028
6036 /* 6029 /*
6037 * now get can unmask PMU interrupts, they will 6030 * now get can unmask PMU interrupts, they will
@@ -6050,7 +6043,6 @@ void
6050pfm_load_regs (struct task_struct *task) 6043pfm_load_regs (struct task_struct *task)
6051{ 6044{
6052 pfm_context_t *ctx; 6045 pfm_context_t *ctx;
6053 struct thread_struct *t;
6054 unsigned long pmc_mask = 0UL, pmd_mask = 0UL; 6046 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6055 unsigned long flags; 6047 unsigned long flags;
6056 u64 psr, psr_up; 6048 u64 psr, psr_up;
@@ -6061,11 +6053,10 @@ pfm_load_regs (struct task_struct *task)
6061 6053
6062 BUG_ON(GET_PMU_OWNER()); 6054 BUG_ON(GET_PMU_OWNER());
6063 6055
6064 t = &task->thread;
6065 /* 6056 /*
6066 * possible on unload 6057 * possible on unload
6067 */ 6058 */
6068 if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return; 6059 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6069 6060
6070 /* 6061 /*
6071 * we always come here with interrupts ALREADY disabled by 6062 * we always come here with interrupts ALREADY disabled by
@@ -6147,21 +6138,21 @@ pfm_load_regs (struct task_struct *task)
6147 * 6138 *
6148 * XXX: optimize here 6139 * XXX: optimize here
6149 */ 6140 */
6150 if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask); 6141 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6151 if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask); 6142 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6152 6143
6153 /* 6144 /*
6154 * check for pending overflow at the time the state 6145 * check for pending overflow at the time the state
6155 * was saved. 6146 * was saved.
6156 */ 6147 */
6157 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) { 6148 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6158 /* 6149 /*
6159 * reload pmc0 with the overflow information 6150 * reload pmc0 with the overflow information
6160 * On McKinley PMU, this will trigger a PMU interrupt 6151 * On McKinley PMU, this will trigger a PMU interrupt
6161 */ 6152 */
6162 ia64_set_pmc(0, t->pmcs[0]); 6153 ia64_set_pmc(0, ctx->th_pmcs[0]);
6163 ia64_srlz_d(); 6154 ia64_srlz_d();
6164 t->pmcs[0] = 0UL; 6155 ctx->th_pmcs[0] = 0UL;
6165 6156
6166 /* 6157 /*
6167 * will replay the PMU interrupt 6158 * will replay the PMU interrupt
@@ -6214,7 +6205,6 @@ pfm_load_regs (struct task_struct *task)
6214void 6205void
6215pfm_load_regs (struct task_struct *task) 6206pfm_load_regs (struct task_struct *task)
6216{ 6207{
6217 struct thread_struct *t;
6218 pfm_context_t *ctx; 6208 pfm_context_t *ctx;
6219 struct task_struct *owner; 6209 struct task_struct *owner;
6220 unsigned long pmd_mask, pmc_mask; 6210 unsigned long pmd_mask, pmc_mask;
@@ -6223,7 +6213,6 @@ pfm_load_regs (struct task_struct *task)
6223 6213
6224 owner = GET_PMU_OWNER(); 6214 owner = GET_PMU_OWNER();
6225 ctx = PFM_GET_CTX(task); 6215 ctx = PFM_GET_CTX(task);
6226 t = &task->thread;
6227 psr = pfm_get_psr(); 6216 psr = pfm_get_psr();
6228 6217
6229 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); 6218 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
@@ -6286,22 +6275,22 @@ pfm_load_regs (struct task_struct *task)
6286 */ 6275 */
6287 pmc_mask = ctx->ctx_all_pmcs[0]; 6276 pmc_mask = ctx->ctx_all_pmcs[0];
6288 6277
6289 pfm_restore_pmds(t->pmds, pmd_mask); 6278 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6290 pfm_restore_pmcs(t->pmcs, pmc_mask); 6279 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6291 6280
6292 /* 6281 /*
6293 * check for pending overflow at the time the state 6282 * check for pending overflow at the time the state
6294 * was saved. 6283 * was saved.
6295 */ 6284 */
6296 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) { 6285 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6297 /* 6286 /*
6298 * reload pmc0 with the overflow information 6287 * reload pmc0 with the overflow information
6299 * On McKinley PMU, this will trigger a PMU interrupt 6288 * On McKinley PMU, this will trigger a PMU interrupt
6300 */ 6289 */
6301 ia64_set_pmc(0, t->pmcs[0]); 6290 ia64_set_pmc(0, ctx->th_pmcs[0]);
6302 ia64_srlz_d(); 6291 ia64_srlz_d();
6303 6292
6304 t->pmcs[0] = 0UL; 6293 ctx->th_pmcs[0] = 0UL;
6305 6294
6306 /* 6295 /*
6307 * will replay the PMU interrupt 6296 * will replay the PMU interrupt
@@ -6376,11 +6365,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6376 */ 6365 */
6377 pfm_unfreeze_pmu(); 6366 pfm_unfreeze_pmu();
6378 } else { 6367 } else {
6379 pmc0 = task->thread.pmcs[0]; 6368 pmc0 = ctx->th_pmcs[0];
6380 /* 6369 /*
6381 * clear whatever overflow status bits there were 6370 * clear whatever overflow status bits there were
6382 */ 6371 */
6383 task->thread.pmcs[0] = 0; 6372 ctx->th_pmcs[0] = 0;
6384 } 6373 }
6385 ovfl_val = pmu_conf->ovfl_val; 6374 ovfl_val = pmu_conf->ovfl_val;
6386 /* 6375 /*
@@ -6401,7 +6390,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6401 /* 6390 /*
6402 * can access PMU always true in system wide mode 6391 * can access PMU always true in system wide mode
6403 */ 6392 */
6404 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i]; 6393 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6405 6394
6406 if (PMD_IS_COUNTING(i)) { 6395 if (PMD_IS_COUNTING(i)) {
6407 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", 6396 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
@@ -6433,7 +6422,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6433 6422
6434 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); 6423 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
6435 6424
6436 if (is_self) task->thread.pmds[i] = pmd_val; 6425 if (is_self) ctx->th_pmds[i] = pmd_val;
6437 6426
6438 ctx->ctx_pmds[i].val = val; 6427 ctx->ctx_pmds[i].val = val;
6439 } 6428 }
@@ -6677,7 +6666,7 @@ pfm_init(void)
6677 ffz(pmu_conf->ovfl_val)); 6666 ffz(pmu_conf->ovfl_val));
6678 6667
6679 /* sanity check */ 6668 /* sanity check */
6680 if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) { 6669 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6681 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n"); 6670 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6682 pmu_conf = NULL; 6671 pmu_conf = NULL;
6683 return -1; 6672 return -1;
@@ -6752,7 +6741,6 @@ void
6752dump_pmu_state(const char *from) 6741dump_pmu_state(const char *from)
6753{ 6742{
6754 struct task_struct *task; 6743 struct task_struct *task;
6755 struct thread_struct *t;
6756 struct pt_regs *regs; 6744 struct pt_regs *regs;
6757 pfm_context_t *ctx; 6745 pfm_context_t *ctx;
6758 unsigned long psr, dcr, info, flags; 6746 unsigned long psr, dcr, info, flags;
@@ -6797,16 +6785,14 @@ dump_pmu_state(const char *from)
6797 ia64_psr(regs)->up = 0; 6785 ia64_psr(regs)->up = 0;
6798 ia64_psr(regs)->pp = 0; 6786 ia64_psr(regs)->pp = 0;
6799 6787
6800 t = &current->thread;
6801
6802 for (i=1; PMC_IS_LAST(i) == 0; i++) { 6788 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6803 if (PMC_IS_IMPL(i) == 0) continue; 6789 if (PMC_IS_IMPL(i) == 0) continue;
6804 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]); 6790 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6805 } 6791 }
6806 6792
6807 for (i=1; PMD_IS_LAST(i) == 0; i++) { 6793 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6808 if (PMD_IS_IMPL(i) == 0) continue; 6794 if (PMD_IS_IMPL(i) == 0) continue;
6809 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]); 6795 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6810 } 6796 }
6811 6797
6812 if (ctx) { 6798 if (ctx) {
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 9065f0f01ba3..e63b8ca5344a 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -266,6 +266,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
266/* Check for outstanding MCA/INIT records every minute (arbitrary) */ 266/* Check for outstanding MCA/INIT records every minute (arbitrary) */
267#define SALINFO_TIMER_DELAY (60*HZ) 267#define SALINFO_TIMER_DELAY (60*HZ)
268static struct timer_list salinfo_timer; 268static struct timer_list salinfo_timer;
269extern void ia64_mlogbuf_dump(void);
269 270
270static void 271static void
271salinfo_timeout_check(struct salinfo_data *data) 272salinfo_timeout_check(struct salinfo_data *data)
@@ -283,6 +284,7 @@ salinfo_timeout_check(struct salinfo_data *data)
283static void 284static void
284salinfo_timeout (unsigned long arg) 285salinfo_timeout (unsigned long arg)
285{ 286{
287 ia64_mlogbuf_dump();
286 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); 288 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA);
287 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT); 289 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT);
288 salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; 290 salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY;
@@ -332,6 +334,8 @@ retry:
332 if (cpu == -1) 334 if (cpu == -1)
333 goto retry; 335 goto retry;
334 336
337 ia64_mlogbuf_dump();
338
335 /* for next read, start checking at next CPU */ 339 /* for next read, start checking at next CPU */
336 data->cpu_check = cpu; 340 data->cpu_check = cpu;
337 if (++data->cpu_check == NR_CPUS) 341 if (++data->cpu_check == NR_CPUS)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 7ad0d9cc6db6..84f93c0f2c66 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -509,7 +509,7 @@ show_cpuinfo (struct seq_file *m, void *v)
509 { 1UL << 1, "spontaneous deferral"}, 509 { 1UL << 1, "spontaneous deferral"},
510 { 1UL << 2, "16-byte atomic ops" } 510 { 1UL << 2, "16-byte atomic ops" }
511 }; 511 };
512 char family[32], features[128], *cp, sep; 512 char features[128], *cp, sep;
513 struct cpuinfo_ia64 *c = v; 513 struct cpuinfo_ia64 *c = v;
514 unsigned long mask; 514 unsigned long mask;
515 unsigned long proc_freq; 515 unsigned long proc_freq;
@@ -517,12 +517,6 @@ show_cpuinfo (struct seq_file *m, void *v)
517 517
518 mask = c->features; 518 mask = c->features;
519 519
520 switch (c->family) {
521 case 0x07: memcpy(family, "Itanium", 8); break;
522 case 0x1f: memcpy(family, "Itanium 2", 10); break;
523 default: sprintf(family, "%u", c->family); break;
524 }
525
526 /* build the feature string: */ 520 /* build the feature string: */
527 memcpy(features, " standard", 10); 521 memcpy(features, " standard", 10);
528 cp = features; 522 cp = features;
@@ -553,8 +547,9 @@ show_cpuinfo (struct seq_file *m, void *v)
553 "processor : %d\n" 547 "processor : %d\n"
554 "vendor : %s\n" 548 "vendor : %s\n"
555 "arch : IA-64\n" 549 "arch : IA-64\n"
556 "family : %s\n" 550 "family : %u\n"
557 "model : %u\n" 551 "model : %u\n"
552 "model name : %s\n"
558 "revision : %u\n" 553 "revision : %u\n"
559 "archrev : %u\n" 554 "archrev : %u\n"
560 "features :%s\n" /* don't change this---it _is_ right! */ 555 "features :%s\n" /* don't change this---it _is_ right! */
@@ -563,7 +558,8 @@ show_cpuinfo (struct seq_file *m, void *v)
563 "cpu MHz : %lu.%06lu\n" 558 "cpu MHz : %lu.%06lu\n"
564 "itc MHz : %lu.%06lu\n" 559 "itc MHz : %lu.%06lu\n"
565 "BogoMIPS : %lu.%02lu\n", 560 "BogoMIPS : %lu.%02lu\n",
566 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 561 cpunum, c->vendor, c->family, c->model,
562 c->model_name, c->revision, c->archrev,
567 features, c->ppn, c->number, 563 features, c->ppn, c->number,
568 proc_freq / 1000, proc_freq % 1000, 564 proc_freq / 1000, proc_freq % 1000,
569 c->itc_freq / 1000000, c->itc_freq % 1000000, 565 c->itc_freq / 1000000, c->itc_freq % 1000000,
@@ -611,6 +607,31 @@ struct seq_operations cpuinfo_op = {
611 .show = show_cpuinfo 607 .show = show_cpuinfo
612}; 608};
613 609
610static char brandname[128];
611
612static char * __cpuinit
613get_model_name(__u8 family, __u8 model)
614{
615 char brand[128];
616
617 if (ia64_pal_get_brand_info(brand)) {
618 if (family == 0x7)
619 memcpy(brand, "Merced", 7);
620 else if (family == 0x1f) switch (model) {
621 case 0: memcpy(brand, "McKinley", 9); break;
622 case 1: memcpy(brand, "Madison", 8); break;
623 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
624 } else
625 memcpy(brand, "Unknown", 8);
626 }
627 if (brandname[0] == '\0')
628 return strcpy(brandname, brand);
629 else if (strcmp(brandname, brand) == 0)
630 return brandname;
631 else
632 return kstrdup(brand, GFP_KERNEL);
633}
634
614static void __cpuinit 635static void __cpuinit
615identify_cpu (struct cpuinfo_ia64 *c) 636identify_cpu (struct cpuinfo_ia64 *c)
616{ 637{
@@ -640,7 +661,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
640 pal_status_t status; 661 pal_status_t status;
641 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 662 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
642 int i; 663 int i;
643
644 for (i = 0; i < 5; ++i) 664 for (i = 0; i < 5; ++i)
645 cpuid.bits[i] = ia64_get_cpuid(i); 665 cpuid.bits[i] = ia64_get_cpuid(i);
646 666
@@ -663,6 +683,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
663 c->family = cpuid.field.family; 683 c->family = cpuid.field.family;
664 c->archrev = cpuid.field.archrev; 684 c->archrev = cpuid.field.archrev;
665 c->features = cpuid.field.features; 685 c->features = cpuid.field.features;
686 c->model_name = get_model_name(c->family, c->model);
666 687
667 status = ia64_pal_vm_summary(&vm1, &vm2); 688 status = ia64_pal_vm_summary(&vm1, &vm2);
668 if (status == PAL_STATUS_SUCCESS) { 689 if (status == PAL_STATUS_SUCCESS) {
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 6203ed4ec8cf..f7d7f5668144 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -879,3 +879,27 @@ identify_siblings(struct cpuinfo_ia64 *c)
879 c->core_id = info.log1_cid; 879 c->core_id = info.log1_cid;
880 c->thread_id = info.log1_tid; 880 c->thread_id = info.log1_tid;
881} 881}
882
883/*
884 * returns non zero, if multi-threading is enabled
885 * on at least one physical package. Due to hotplug cpu
886 * and (maxcpus=), all threads may not necessarily be enabled
887 * even though the processor supports multi-threading.
888 */
889int is_multithreading_enabled(void)
890{
891 int i, j;
892
893 for_each_present_cpu(i) {
894 for_each_present_cpu(j) {
895 if (j == i)
896 continue;
897 if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
898 if (cpu_data(j)->core_id == cpu_data(i)->core_id)
899 return 1;
900 }
901 }
902 }
903 return 0;
904}
905EXPORT_SYMBOL_GPL(is_multithreading_enabled);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index f648c610b10c..5629b45e89c6 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -36,6 +36,7 @@ int arch_register_cpu(int num)
36 */ 36 */
37 if (!can_cpei_retarget() && is_cpu_cpei_target(num)) 37 if (!can_cpei_retarget() && is_cpu_cpei_target(num))
38 sysfs_cpus[num].cpu.no_control = 1; 38 sysfs_cpus[num].cpu.no_control = 1;
39 map_cpu_to_node(num, node_cpuid[num].nid);
39#endif 40#endif
40 41
41 return register_cpu(&sysfs_cpus[num].cpu, num); 42 return register_cpu(&sysfs_cpus[num].cpu, num);
@@ -45,7 +46,8 @@ int arch_register_cpu(int num)
45 46
46void arch_unregister_cpu(int num) 47void arch_unregister_cpu(int num)
47{ 48{
48 return unregister_cpu(&sysfs_cpus[num].cpu); 49 unregister_cpu(&sysfs_cpus[num].cpu);
50 unmap_cpu_from_node(num, cpu_to_node(num));
49} 51}
50EXPORT_SYMBOL(arch_register_cpu); 52EXPORT_SYMBOL(arch_register_cpu);
51EXPORT_SYMBOL(arch_unregister_cpu); 53EXPORT_SYMBOL(arch_unregister_cpu);
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 4c73a6763669..c58e933694d5 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
98 98
99 /* attempt to allocate a granule's worth of cached memory pages */ 99 /* attempt to allocate a granule's worth of cached memory pages */
100 100
101 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 101 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
102 IA64_GRANULE_SHIFT-PAGE_SHIFT); 102 IA64_GRANULE_SHIFT-PAGE_SHIFT);
103 if (!page) { 103 if (!page) {
104 mutex_unlock(&uc_pool->add_chunk_mutex); 104 mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5b0d5f64a9b1..b3b2e389d6b2 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -184,7 +184,9 @@ SECTIONS
184 *(.data.gate) 184 *(.data.gate)
185 __stop_gate_section = .; 185 __stop_gate_section = .;
186 } 186 }
187 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */ 187 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
188 * kernel data
189 */
188 190
189 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) 191 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
190 { *(.data.read_mostly) } 192 { *(.data.read_mostly) }
@@ -202,7 +204,9 @@ SECTIONS
202 *(.data.percpu) 204 *(.data.percpu)
203 __per_cpu_end = .; 205 __per_cpu_end = .;
204 } 206 }
205 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */ 207 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
208 * into percpu page size
209 */
206 210
207 data : { } :data 211 data : { } :data
208 .data : AT(ADDR(.data) - LOAD_OFFSET) 212 .data : AT(ADDR(.data) - LOAD_OFFSET)