aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-12-12 17:41:41 -0500
committerDave Jones <davej@redhat.com>2006-12-12 17:41:41 -0500
commitc4366889dda8110247be59ca41fddb82951a8c26 (patch)
tree705c1a996bed8fd48ce94ff33ec9fd00f9b94875 /arch/ia64/kernel
parentdb2fb9db5735cc532fd4fc55e94b9a3c3750378e (diff)
parente1036502e5263851259d147771226161e5ccc85a (diff)
Merge ../linus
Conflicts: drivers/cpufreq/cpufreq.c
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/acpi.c9
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c11
-rw-r--r--arch/ia64/kernel/crash.c245
-rw-r--r--arch/ia64/kernel/efi.c71
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c1
-rw-r--r--arch/ia64/kernel/iosapic.c27
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c24
-rw-r--r--arch/ia64/kernel/irq_lsapic.c2
-rw-r--r--arch/ia64/kernel/kprobes.c4
-rw-r--r--arch/ia64/kernel/machine_kexec.c133
-rw-r--r--arch/ia64/kernel/mca.c13
-rw-r--r--arch/ia64/kernel/mca_drv.c95
-rw-r--r--arch/ia64/kernel/pal.S58
-rw-r--r--arch/ia64/kernel/palinfo.c24
-rw-r--r--arch/ia64/kernel/perfmon.c18
-rw-r--r--arch/ia64/kernel/perfmon_montecito.h12
-rw-r--r--arch/ia64/kernel/relocate_kernel.S334
-rw-r--r--arch/ia64/kernel/sal.c11
-rw-r--r--arch/ia64/kernel/salinfo.c8
-rw-r--r--arch/ia64/kernel/setup.c40
-rw-r--r--arch/ia64/kernel/smp.c40
-rw-r--r--arch/ia64/kernel/smpboot.c12
-rw-r--r--arch/ia64/kernel/time.c6
-rw-r--r--arch/ia64/kernel/topology.c8
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S8
28 files changed, 1073 insertions, 148 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index cfa099b04cda..8ae384eb5357 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
28obj-$(CONFIG_CPU_FREQ) += cpufreq/ 28obj-$(CONFIG_CPU_FREQ) += cpufreq/
29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
31obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
31obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 32obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
32obj-$(CONFIG_AUDIT) += audit.o 33obj-$(CONFIG_AUDIT) += audit.o
33obj-$(CONFIG_PCI_MSI) += msi_ia64.o 34obj-$(CONFIG_PCI_MSI) += msi_ia64.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 32c3abededc6..73ef4a85b861 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -64,9 +64,6 @@ EXPORT_SYMBOL(pm_idle);
64void (*pm_power_off) (void); 64void (*pm_power_off) (void);
65EXPORT_SYMBOL(pm_power_off); 65EXPORT_SYMBOL(pm_power_off);
66 66
67unsigned char acpi_kbd_controller_present = 1;
68unsigned char acpi_legacy_devices;
69
70unsigned int acpi_cpei_override; 67unsigned int acpi_cpei_override;
71unsigned int acpi_cpei_phys_cpuid; 68unsigned int acpi_cpei_phys_cpuid;
72 69
@@ -628,12 +625,6 @@ static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
628 625
629 fadt = (struct fadt_descriptor *)fadt_header; 626 fadt = (struct fadt_descriptor *)fadt_header;
630 627
631 if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
632 acpi_kbd_controller_present = 0;
633
634 if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
635 acpi_legacy_devices = 1;
636
637 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); 628 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
638 return 0; 629 return 0;
639} 630}
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 86faf221a070..088f130197ae 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -68,7 +68,8 @@ processor_get_pstate (
68 68
69 dprintk("processor_get_pstate\n"); 69 dprintk("processor_get_pstate\n");
70 70
71 retval = ia64_pal_get_pstate(&pstate_index); 71 retval = ia64_pal_get_pstate(&pstate_index,
72 PAL_GET_PSTATE_TYPE_INSTANT);
72 *value = (u32) pstate_index; 73 *value = (u32) pstate_index;
73 74
74 if (retval) 75 if (retval)
@@ -91,7 +92,7 @@ extract_clock (
91 dprintk("extract_clock\n"); 92 dprintk("extract_clock\n");
92 93
93 for (i = 0; i < data->acpi_data.state_count; i++) { 94 for (i = 0; i < data->acpi_data.state_count; i++) {
94 if (value >= data->acpi_data.states[i].control) 95 if (value == data->acpi_data.states[i].status)
95 return data->acpi_data.states[i].core_frequency; 96 return data->acpi_data.states[i].core_frequency;
96 } 97 }
97 return data->acpi_data.states[i-1].core_frequency; 98 return data->acpi_data.states[i-1].core_frequency;
@@ -117,11 +118,7 @@ processor_get_freq (
117 goto migrate_end; 118 goto migrate_end;
118 } 119 }
119 120
120 /* 121 /* processor_get_pstate gets the instantaneous frequency */
121 * processor_get_pstate gets the average frequency since the
122 * last get. So, do two PAL_get_freq()...
123 */
124 ret = processor_get_pstate(&value);
125 ret = processor_get_pstate(&value); 122 ret = processor_get_pstate(&value);
126 123
127 if (ret) { 124 if (ret) {
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
new file mode 100644
index 000000000000..0aabedf95dad
--- /dev/null
+++ b/arch/ia64/kernel/crash.c
@@ -0,0 +1,245 @@
1/*
2 * arch/ia64/kernel/crash.c
3 *
4 * Architecture specific (ia64) functions for kexec based crash dumps.
5 *
6 * Created by: Khalid Aziz <khalid.aziz@hp.com>
7 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
8 * Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com>
9 *
10 */
11#include <linux/smp.h>
12#include <linux/delay.h>
13#include <linux/crash_dump.h>
14#include <linux/bootmem.h>
15#include <linux/kexec.h>
16#include <linux/elfcore.h>
17#include <linux/sysctl.h>
18#include <linux/init.h>
19
20#include <asm/kdebug.h>
21#include <asm/mca.h>
22#include <asm/uaccess.h>
23
24int kdump_status[NR_CPUS];
25atomic_t kdump_cpu_freezed;
26atomic_t kdump_in_progress;
27int kdump_on_init = 1;
28ssize_t
29copy_oldmem_page(unsigned long pfn, char *buf,
30 size_t csize, unsigned long offset, int userbuf)
31{
32 void *vaddr;
33
34 if (!csize)
35 return 0;
36 vaddr = __va(pfn<<PAGE_SHIFT);
37 if (userbuf) {
38 if (copy_to_user(buf, (vaddr + offset), csize)) {
39 return -EFAULT;
40 }
41 } else
42 memcpy(buf, (vaddr + offset), csize);
43 return csize;
44}
45
46static inline Elf64_Word
47*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
48 size_t data_len)
49{
50 struct elf_note *note = (struct elf_note *)buf;
51 note->n_namesz = strlen(name) + 1;
52 note->n_descsz = data_len;
53 note->n_type = type;
54 buf += (sizeof(*note) + 3)/4;
55 memcpy(buf, name, note->n_namesz);
56 buf += (note->n_namesz + 3)/4;
57 memcpy(buf, data, data_len);
58 buf += (data_len + 3)/4;
59 return buf;
60}
61
62static void
63final_note(void *buf)
64{
65 memset(buf, 0, sizeof(struct elf_note));
66}
67
68extern void ia64_dump_cpu_regs(void *);
69
70static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
71
72void
73crash_save_this_cpu()
74{
75 void *buf;
76 unsigned long cfm, sof, sol;
77
78 int cpu = smp_processor_id();
79 struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
80
81 elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
82 memset(prstatus, 0, sizeof(*prstatus));
83 prstatus->pr_pid = current->pid;
84
85 ia64_dump_cpu_regs(dst);
86 cfm = dst[43];
87 sol = (cfm >> 7) & 0x7f;
88 sof = cfm & 0x7f;
89 dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
90 sof - sol);
91
92 buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
93 if (!buf)
94 return;
95 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus,
96 sizeof(*prstatus));
97 final_note(buf);
98}
99
100static int
101kdump_wait_cpu_freeze(void)
102{
103 int cpu_num = num_online_cpus() - 1;
104 int timeout = 1000;
105 while(timeout-- > 0) {
106 if (atomic_read(&kdump_cpu_freezed) == cpu_num)
107 return 0;
108 udelay(1000);
109 }
110 return 1;
111}
112
113void
114machine_crash_shutdown(struct pt_regs *pt)
115{
116 /* This function is only called after the system
117 * has paniced or is otherwise in a critical state.
118 * The minimum amount of code to allow a kexec'd kernel
119 * to run successfully needs to happen here.
120 *
121 * In practice this means shooting down the other cpus in
122 * an SMP system.
123 */
124 kexec_disable_iosapic();
125#ifdef CONFIG_SMP
126 kdump_smp_send_stop();
127 if (kdump_wait_cpu_freeze() && kdump_on_init) {
128 //not all cpu response to IPI, send INIT to freeze them
129 kdump_smp_send_init();
130 }
131#endif
132}
133
134static void
135machine_kdump_on_init(void)
136{
137 local_irq_disable();
138 kexec_disable_iosapic();
139 machine_kexec(ia64_kimage);
140}
141
142void
143kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
144{
145 int cpuid;
146 local_irq_disable();
147 cpuid = smp_processor_id();
148 crash_save_this_cpu();
149 current->thread.ksp = (__u64)info->sw - 16;
150 atomic_inc(&kdump_cpu_freezed);
151 kdump_status[cpuid] = 1;
152 mb();
153 if (cpuid == 0) {
154 for (;;)
155 cpu_relax();
156 } else
157 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
158}
159
160static int
161kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
162{
163 struct ia64_mca_notify_die *nd;
164 struct die_args *args = data;
165
166 if (!kdump_on_init)
167 return NOTIFY_DONE;
168
169 if (val != DIE_INIT_MONARCH_ENTER &&
170 val != DIE_INIT_SLAVE_ENTER &&
171 val != DIE_MCA_RENDZVOUS_LEAVE &&
172 val != DIE_MCA_MONARCH_LEAVE)
173 return NOTIFY_DONE;
174
175 nd = (struct ia64_mca_notify_die *)args->err;
176 /* Reason code 1 means machine check rendezous*/
177 if ((val == DIE_INIT_MONARCH_ENTER || DIE_INIT_SLAVE_ENTER) &&
178 nd->sos->rv_rc == 1)
179 return NOTIFY_DONE;
180
181 switch (val) {
182 case DIE_INIT_MONARCH_ENTER:
183 machine_kdump_on_init();
184 break;
185 case DIE_INIT_SLAVE_ENTER:
186 unw_init_running(kdump_cpu_freeze, NULL);
187 break;
188 case DIE_MCA_RENDZVOUS_LEAVE:
189 if (atomic_read(&kdump_in_progress))
190 unw_init_running(kdump_cpu_freeze, NULL);
191 break;
192 case DIE_MCA_MONARCH_LEAVE:
193 /* die_register->signr indicate if MCA is recoverable */
194 if (!args->signr)
195 machine_kdump_on_init();
196 break;
197 }
198 return NOTIFY_DONE;
199}
200
201#ifdef CONFIG_SYSCTL
202static ctl_table kdump_on_init_table[] = {
203 {
204 .ctl_name = CTL_UNNUMBERED,
205 .procname = "kdump_on_init",
206 .data = &kdump_on_init,
207 .maxlen = sizeof(int),
208 .mode = 0644,
209 .proc_handler = &proc_dointvec,
210 },
211 { .ctl_name = 0 }
212};
213
214static ctl_table sys_table[] = {
215 {
216 .ctl_name = CTL_KERN,
217 .procname = "kernel",
218 .mode = 0555,
219 .child = kdump_on_init_table,
220 },
221 { .ctl_name = 0 }
222};
223#endif
224
225static int
226machine_crash_setup(void)
227{
228 char *from = strstr(saved_command_line, "elfcorehdr=");
229 static struct notifier_block kdump_init_notifier_nb = {
230 .notifier_call = kdump_init_notifier,
231 };
232 int ret;
233 if (from)
234 elfcorehdr_addr = memparse(from+11, &from);
235 saved_max_pfn = (unsigned long)-1;
236 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
237 return ret;
238#ifdef CONFIG_SYSCTL
239 register_sysctl_table(sys_table, 0);
240#endif
241 return 0;
242}
243
244__initcall(machine_crash_setup);
245
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index bb8770a177b5..0b25a7d4e1e4 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -26,6 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/time.h> 27#include <linux/time.h>
28#include <linux/efi.h> 28#include <linux/efi.h>
29#include <linux/kexec.h>
29 30
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/kregs.h> 32#include <asm/kregs.h>
@@ -41,7 +42,7 @@ extern efi_status_t efi_call_phys (void *, ...);
41struct efi efi; 42struct efi efi;
42EXPORT_SYMBOL(efi); 43EXPORT_SYMBOL(efi);
43static efi_runtime_services_t *runtime; 44static efi_runtime_services_t *runtime;
44static unsigned long mem_limit = ~0UL, max_addr = ~0UL; 45static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
45 46
46#define efi_call_virt(f, args...) (*(f))(args) 47#define efi_call_virt(f, args...) (*(f))(args)
47 48
@@ -224,7 +225,7 @@ efi_gettimeofday (struct timespec *ts)
224} 225}
225 226
226static int 227static int
227is_available_memory (efi_memory_desc_t *md) 228is_memory_available (efi_memory_desc_t *md)
228{ 229{
229 if (!(md->attribute & EFI_MEMORY_WB)) 230 if (!(md->attribute & EFI_MEMORY_WB))
230 return 0; 231 return 0;
@@ -421,6 +422,8 @@ efi_init (void)
421 mem_limit = memparse(cp + 4, &cp); 422 mem_limit = memparse(cp + 4, &cp);
422 } else if (memcmp(cp, "max_addr=", 9) == 0) { 423 } else if (memcmp(cp, "max_addr=", 9) == 0) {
423 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 424 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
425 } else if (memcmp(cp, "min_addr=", 9) == 0) {
426 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
424 } else { 427 } else {
425 while (*cp != ' ' && *cp) 428 while (*cp != ' ' && *cp)
426 ++cp; 429 ++cp;
@@ -428,6 +431,8 @@ efi_init (void)
428 ++cp; 431 ++cp;
429 } 432 }
430 } 433 }
434 if (min_addr != 0UL)
435 printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
431 if (max_addr != ~0UL) 436 if (max_addr != ~0UL)
432 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); 437 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
433 438
@@ -887,14 +892,15 @@ find_memmap_space (void)
887 } 892 }
888 contig_high = GRANULEROUNDDOWN(contig_high); 893 contig_high = GRANULEROUNDDOWN(contig_high);
889 } 894 }
890 if (!is_available_memory(md) || md->type == EFI_LOADER_DATA) 895 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
891 continue; 896 continue;
892 897
893 /* Round ends inward to granule boundaries */ 898 /* Round ends inward to granule boundaries */
894 as = max(contig_low, md->phys_addr); 899 as = max(contig_low, md->phys_addr);
895 ae = min(contig_high, efi_md_end(md)); 900 ae = min(contig_high, efi_md_end(md));
896 901
897 /* keep within max_addr= command line arg */ 902 /* keep within max_addr= and min_addr= command line arg */
903 as = max(as, min_addr);
898 ae = min(ae, max_addr); 904 ae = min(ae, max_addr);
899 if (ae <= as) 905 if (ae <= as)
900 continue; 906 continue;
@@ -962,7 +968,7 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
962 } 968 }
963 contig_high = GRANULEROUNDDOWN(contig_high); 969 contig_high = GRANULEROUNDDOWN(contig_high);
964 } 970 }
965 if (!is_available_memory(md)) 971 if (!is_memory_available(md))
966 continue; 972 continue;
967 973
968 /* 974 /*
@@ -1004,7 +1010,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
1004 } else 1010 } else
1005 ae = efi_md_end(md); 1011 ae = efi_md_end(md);
1006 1012
1007 /* keep within max_addr= command line arg */ 1013 /* keep within max_addr= and min_addr= command line arg */
1014 as = max(as, min_addr);
1008 ae = min(ae, max_addr); 1015 ae = min(ae, max_addr);
1009 if (ae <= as) 1016 if (ae <= as)
1010 continue; 1017 continue;
@@ -1116,6 +1123,58 @@ efi_initialize_iomem_resources(struct resource *code_resource,
1116 */ 1123 */
1117 insert_resource(res, code_resource); 1124 insert_resource(res, code_resource);
1118 insert_resource(res, data_resource); 1125 insert_resource(res, data_resource);
1126#ifdef CONFIG_KEXEC
1127 insert_resource(res, &efi_memmap_res);
1128 insert_resource(res, &boot_param_res);
1129 if (crashk_res.end > crashk_res.start)
1130 insert_resource(res, &crashk_res);
1131#endif
1119 } 1132 }
1120 } 1133 }
1121} 1134}
1135
1136#ifdef CONFIG_KEXEC
1137/* find a block of memory aligned to 64M exclude reserved regions
1138 rsvd_regions are sorted
1139 */
1140unsigned long
1141kdump_find_rsvd_region (unsigned long size,
1142 struct rsvd_region *r, int n)
1143{
1144 int i;
1145 u64 start, end;
1146 u64 alignment = 1UL << _PAGE_SIZE_64M;
1147 void *efi_map_start, *efi_map_end, *p;
1148 efi_memory_desc_t *md;
1149 u64 efi_desc_size;
1150
1151 efi_map_start = __va(ia64_boot_param->efi_memmap);
1152 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1153 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1154
1155 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1156 md = p;
1157 if (!efi_wb(md))
1158 continue;
1159 start = ALIGN(md->phys_addr, alignment);
1160 end = efi_md_end(md);
1161 for (i = 0; i < n; i++) {
1162 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
1163 if (__pa(r[i].start) > start + size)
1164 return start;
1165 start = ALIGN(__pa(r[i].end), alignment);
1166 if (i < n-1 && __pa(r[i+1].start) < start + size)
1167 continue;
1168 else
1169 break;
1170 }
1171 }
1172 if (end > start + size)
1173 return start;
1174 }
1175
1176 printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
1177 size);
1178 return ~0UL;
1179}
1180#endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 3390b7c5a63f..15234ed3a341 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1575,7 +1575,7 @@ sys_call_table:
1575 data8 sys_mq_timedreceive // 1265 1575 data8 sys_mq_timedreceive // 1265
1576 data8 sys_mq_notify 1576 data8 sys_mq_notify
1577 data8 sys_mq_getsetattr 1577 data8 sys_mq_getsetattr
1578 data8 sys_ni_syscall // reserved for kexec_load 1578 data8 sys_kexec_load
1579 data8 sys_ni_syscall // reserved for vserver 1579 data8 sys_ni_syscall // reserved for vserver
1580 data8 sys_waitid // 1270 1580 data8 sys_waitid // 1270
1581 data8 sys_add_key 1581 data8 sys_add_key
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 879c1817bd1c..bd17190bebb6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -14,6 +14,7 @@ EXPORT_SYMBOL(strlen);
14 14
15#include <asm/checksum.h> 15#include <asm/checksum.h>
16EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 16EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
17EXPORT_SYMBOL(csum_ipv6_magic);
17 18
18#include <asm/semaphore.h> 19#include <asm/semaphore.h>
19EXPORT_SYMBOL(__down); 20EXPORT_SYMBOL(__down);
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 9bf15fefa7e4..0fc5fb7865cf 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -288,6 +288,27 @@ nop (unsigned int irq)
288 /* do nothing... */ 288 /* do nothing... */
289} 289}
290 290
291
292#ifdef CONFIG_KEXEC
293void
294kexec_disable_iosapic(void)
295{
296 struct iosapic_intr_info *info;
297 struct iosapic_rte_info *rte;
298 u8 vec = 0;
299 for (info = iosapic_intr_info; info <
300 iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
301 list_for_each_entry(rte, &info->rtes,
302 rte_list) {
303 iosapic_write(rte->addr,
304 IOSAPIC_RTE_LOW(rte->rte_index),
305 IOSAPIC_MASK|vec);
306 iosapic_eoi(rte->addr, vec);
307 }
308 }
309}
310#endif
311
291static void 312static void
292mask_irq (unsigned int irq) 313mask_irq (unsigned int irq)
293{ 314{
@@ -426,7 +447,7 @@ iosapic_end_level_irq (unsigned int irq)
426#define iosapic_ack_level_irq nop 447#define iosapic_ack_level_irq nop
427 448
428struct hw_interrupt_type irq_type_iosapic_level = { 449struct hw_interrupt_type irq_type_iosapic_level = {
429 .typename = "IO-SAPIC-level", 450 .name = "IO-SAPIC-level",
430 .startup = iosapic_startup_level_irq, 451 .startup = iosapic_startup_level_irq,
431 .shutdown = iosapic_shutdown_level_irq, 452 .shutdown = iosapic_shutdown_level_irq,
432 .enable = iosapic_enable_level_irq, 453 .enable = iosapic_enable_level_irq,
@@ -473,7 +494,7 @@ iosapic_ack_edge_irq (unsigned int irq)
473#define iosapic_end_edge_irq nop 494#define iosapic_end_edge_irq nop
474 495
475struct hw_interrupt_type irq_type_iosapic_edge = { 496struct hw_interrupt_type irq_type_iosapic_edge = {
476 .typename = "IO-SAPIC-edge", 497 .name = "IO-SAPIC-edge",
477 .startup = iosapic_startup_edge_irq, 498 .startup = iosapic_startup_edge_irq,
478 .shutdown = iosapic_disable_edge_irq, 499 .shutdown = iosapic_disable_edge_irq,
479 .enable = iosapic_enable_edge_irq, 500 .enable = iosapic_enable_edge_irq,
@@ -664,7 +685,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
664 printk(KERN_WARNING 685 printk(KERN_WARNING
665 "%s: changing vector %d from %s to %s\n", 686 "%s: changing vector %d from %s to %s\n",
666 __FUNCTION__, vector, 687 __FUNCTION__, vector,
667 idesc->chip->typename, irq_type->typename); 688 idesc->chip->name, irq_type->name);
668 idesc->chip = irq_type; 689 idesc->chip = irq_type;
669 } 690 }
670 return 0; 691 return 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index f07c0864b0b4..54d55e4d64f7 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
76 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 76 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
77 } 77 }
78#endif 78#endif
79 seq_printf(p, " %14s", irq_desc[i].chip->typename); 79 seq_printf(p, " %14s", irq_desc[i].chip->name);
80 seq_printf(p, " %s", action->name); 80 seq_printf(p, " %s", action->name);
81 81
82 for (action=action->next; action; action = action->next) 82 for (action=action->next; action; action = action->next)
@@ -197,7 +197,7 @@ void fixup_irqs(void)
197 struct pt_regs *old_regs = set_irq_regs(NULL); 197 struct pt_regs *old_regs = set_irq_regs(NULL);
198 198
199 vectors_in_migration[irq]=0; 199 vectors_in_migration[irq]=0;
200 __do_IRQ(irq); 200 generic_handle_irq(irq);
201 set_irq_regs(old_regs); 201 set_irq_regs(old_regs);
202 } 202 }
203 } 203 }
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 68339dd0c9e2..ba3ba8bc50be 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -180,11 +180,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
180 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 180 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
181 ia64_srlz_d(); 181 ia64_srlz_d();
182 while (vector != IA64_SPURIOUS_INT_VECTOR) { 182 while (vector != IA64_SPURIOUS_INT_VECTOR) {
183 if (!IS_RESCHEDULE(vector)) { 183 if (unlikely(IS_RESCHEDULE(vector)))
184 kstat_this_cpu.irqs[vector]++;
185 else {
184 ia64_setreg(_IA64_REG_CR_TPR, vector); 186 ia64_setreg(_IA64_REG_CR_TPR, vector);
185 ia64_srlz_d(); 187 ia64_srlz_d();
186 188
187 __do_IRQ(local_vector_to_irq(vector)); 189 generic_handle_irq(local_vector_to_irq(vector));
188 190
189 /* 191 /*
190 * Disable interrupts and send EOI: 192 * Disable interrupts and send EOI:
@@ -225,7 +227,9 @@ void ia64_process_pending_intr(void)
225 * Perform normal interrupt style processing 227 * Perform normal interrupt style processing
226 */ 228 */
227 while (vector != IA64_SPURIOUS_INT_VECTOR) { 229 while (vector != IA64_SPURIOUS_INT_VECTOR) {
228 if (!IS_RESCHEDULE(vector)) { 230 if (unlikely(IS_RESCHEDULE(vector)))
231 kstat_this_cpu.irqs[vector]++;
232 else {
229 struct pt_regs *old_regs = set_irq_regs(NULL); 233 struct pt_regs *old_regs = set_irq_regs(NULL);
230 234
231 ia64_setreg(_IA64_REG_CR_TPR, vector); 235 ia64_setreg(_IA64_REG_CR_TPR, vector);
@@ -238,7 +242,7 @@ void ia64_process_pending_intr(void)
238 * Probably could shared code. 242 * Probably could shared code.
239 */ 243 */
240 vectors_in_migration[local_vector_to_irq(vector)]=0; 244 vectors_in_migration[local_vector_to_irq(vector)]=0;
241 __do_IRQ(local_vector_to_irq(vector)); 245 generic_handle_irq(local_vector_to_irq(vector));
242 set_irq_regs(old_regs); 246 set_irq_regs(old_regs);
243 247
244 /* 248 /*
@@ -258,11 +262,22 @@ void ia64_process_pending_intr(void)
258#ifdef CONFIG_SMP 262#ifdef CONFIG_SMP
259extern irqreturn_t handle_IPI (int irq, void *dev_id); 263extern irqreturn_t handle_IPI (int irq, void *dev_id);
260 264
265static irqreturn_t dummy_handler (int irq, void *dev_id)
266{
267 BUG();
268}
269
261static struct irqaction ipi_irqaction = { 270static struct irqaction ipi_irqaction = {
262 .handler = handle_IPI, 271 .handler = handle_IPI,
263 .flags = IRQF_DISABLED, 272 .flags = IRQF_DISABLED,
264 .name = "IPI" 273 .name = "IPI"
265}; 274};
275
276static struct irqaction resched_irqaction = {
277 .handler = dummy_handler,
278 .flags = SA_INTERRUPT,
279 .name = "resched"
280};
266#endif 281#endif
267 282
268void 283void
@@ -287,6 +302,7 @@ init_IRQ (void)
287 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 302 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
288#ifdef CONFIG_SMP 303#ifdef CONFIG_SMP
289 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 304 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
305 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
290#endif 306#endif
291#ifdef CONFIG_PERFMON 307#ifdef CONFIG_PERFMON
292 pfm_init_percpu(); 308 pfm_init_percpu();
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index 1ab58b09f3d7..c2f07beb1759 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -34,7 +34,7 @@ static int lsapic_retrigger(unsigned int irq)
34} 34}
35 35
36struct hw_interrupt_type irq_type_ia64_lsapic = { 36struct hw_interrupt_type irq_type_ia64_lsapic = {
37 .typename = "LSAPIC", 37 .name = "LSAPIC",
38 .startup = lsapic_noop_startup, 38 .startup = lsapic_noop_startup,
39 .shutdown = lsapic_noop, 39 .shutdown = lsapic_noop,
40 .enable = lsapic_noop, 40 .enable = lsapic_noop,
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 51217d63285e..76e778951e20 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -481,7 +481,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
481void __kprobes arch_remove_kprobe(struct kprobe *p) 481void __kprobes arch_remove_kprobe(struct kprobe *p)
482{ 482{
483 mutex_lock(&kprobe_mutex); 483 mutex_lock(&kprobe_mutex);
484 free_insn_slot(p->ainsn.insn); 484 free_insn_slot(p->ainsn.insn, 0);
485 mutex_unlock(&kprobe_mutex); 485 mutex_unlock(&kprobe_mutex);
486} 486}
487/* 487/*
@@ -851,7 +851,7 @@ static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg)
851 return; 851 return;
852 } 852 }
853 } while (unw_unwind(info) >= 0); 853 } while (unw_unwind(info) >= 0);
854 lp->bsp = 0; 854 lp->bsp = NULL;
855 lp->cfm = 0; 855 lp->cfm = 0;
856 return; 856 return;
857} 857}
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
new file mode 100644
index 000000000000..468233fa2cee
--- /dev/null
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -0,0 +1,133 @@
1/*
2 * arch/ia64/kernel/machine_kexec.c
3 *
4 * Handle transition of Linux booting another kernel
5 * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P.
6 * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
7 * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
12
13#include <linux/mm.h>
14#include <linux/kexec.h>
15#include <linux/cpu.h>
16#include <linux/irq.h>
17#include <asm/mmu_context.h>
18#include <asm/setup.h>
19#include <asm/delay.h>
20#include <asm/meminit.h>
21
22typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long,
23 struct ia64_boot_param *, unsigned long);
24
25struct kimage *ia64_kimage;
26
27struct resource efi_memmap_res = {
28 .name = "EFI Memory Map",
29 .start = 0,
30 .end = 0,
31 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
32};
33
34struct resource boot_param_res = {
35 .name = "Boot parameter",
36 .start = 0,
37 .end = 0,
38 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
39};
40
41
42/*
43 * Do what every setup is needed on image and the
44 * reboot code buffer to allow us to avoid allocations
45 * later.
46 */
47int machine_kexec_prepare(struct kimage *image)
48{
49 void *control_code_buffer;
50 const unsigned long *func;
51
52 func = (unsigned long *)&relocate_new_kernel;
53 /* Pre-load control code buffer to minimize work in kexec path */
54 control_code_buffer = page_address(image->control_code_page);
55 memcpy((void *)control_code_buffer, (const void *)func[0],
56 relocate_new_kernel_size);
57 flush_icache_range((unsigned long)control_code_buffer,
58 (unsigned long)control_code_buffer + relocate_new_kernel_size);
59 ia64_kimage = image;
60
61 return 0;
62}
63
64void machine_kexec_cleanup(struct kimage *image)
65{
66}
67
68void machine_shutdown(void)
69{
70 int cpu;
71
72 for_each_online_cpu(cpu) {
73 if (cpu != smp_processor_id())
74 cpu_down(cpu);
75 }
76 kexec_disable_iosapic();
77}
78
79/*
80 * Do not allocate memory (or fail in any way) in machine_kexec().
81 * We are past the point of no return, committed to rebooting now.
82 */
83extern void *efi_get_pal_addr(void);
84static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
85{
86 struct kimage *image = arg;
87 relocate_new_kernel_t rnk;
88 void *pal_addr = efi_get_pal_addr();
89 unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
90 unsigned long vector;
91 int ii;
92
93 if (image->type == KEXEC_TYPE_CRASH) {
94 crash_save_this_cpu();
95 current->thread.ksp = (__u64)info->sw - 16;
96 }
97
98 /* Interrupts aren't acceptable while we reboot */
99 local_irq_disable();
100
101 /* Mask CMC and Performance Monitor interrupts */
102 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
103 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
104
105 /* Mask ITV and Local Redirect Registers */
106 ia64_set_itv(1 << 16);
107 ia64_set_lrr0(1 << 16);
108 ia64_set_lrr1(1 << 16);
109
110 /* terminate possible nested in-service interrupts */
111 for (ii = 0; ii < 16; ii++)
112 ia64_eoi();
113
114 /* unmask TPR and clear any pending interrupts */
115 ia64_setreg(_IA64_REG_CR_TPR, 0);
116 ia64_srlz_d();
117 vector = ia64_get_ivr();
118 while (vector != IA64_SPURIOUS_INT_VECTOR) {
119 ia64_eoi();
120 vector = ia64_get_ivr();
121 }
122 platform_kernel_launch_event();
123 rnk = (relocate_new_kernel_t)&code_addr;
124 (*rnk)(image->head, image->start, ia64_boot_param,
125 GRANULEROUNDDOWN((unsigned long) pal_addr));
126 BUG();
127}
128
129void machine_kexec(struct kimage *image)
130{
131 unw_init_running(ia64_machine_kexec, image);
132 for(;;);
133}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7cfa63a98cb3..87c1c4f42872 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -82,6 +82,7 @@
82#include <asm/system.h> 82#include <asm/system.h>
83#include <asm/sal.h> 83#include <asm/sal.h>
84#include <asm/mca.h> 84#include <asm/mca.h>
85#include <asm/kexec.h>
85 86
86#include <asm/irq.h> 87#include <asm/irq.h>
87#include <asm/hw_irq.h> 88#include <asm/hw_irq.h>
@@ -678,7 +679,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
678 * disable the cmc interrupt vector. 679 * disable the cmc interrupt vector.
679 */ 680 */
680static void 681static void
681ia64_mca_cmc_vector_disable_keventd(void *unused) 682ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
682{ 683{
683 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); 684 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
684} 685}
@@ -690,7 +691,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused)
690 * enable the cmc interrupt vector. 691 * enable the cmc interrupt vector.
691 */ 692 */
692static void 693static void
693ia64_mca_cmc_vector_enable_keventd(void *unused) 694ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
694{ 695{
695 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); 696 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
696} 697}
@@ -1238,6 +1239,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1238 } else { 1239 } else {
1239 /* Dump buffered message to console */ 1240 /* Dump buffered message to console */
1240 ia64_mlogbuf_finish(1); 1241 ia64_mlogbuf_finish(1);
1242#ifdef CONFIG_CRASH_DUMP
1243 atomic_set(&kdump_in_progress, 1);
1244 monarch_cpu = -1;
1245#endif
1241 } 1246 }
1242 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1247 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1243 == NOTIFY_STOP) 1248 == NOTIFY_STOP)
@@ -1247,8 +1252,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1247 monarch_cpu = -1; 1252 monarch_cpu = -1;
1248} 1253}
1249 1254
1250static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); 1255static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
1251static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); 1256static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
1252 1257
1253/* 1258/*
1254 * ia64_mca_cmc_int_handler 1259 * ia64_mca_cmc_int_handler
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index a45009d2bc90..afc1403799c9 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -435,6 +435,50 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
435} 435}
436 436
437/** 437/**
438 * get_target_identifier - Get the valid Cache or Bus check target identifier.
439 * @peidx: pointer of index of processor error section
440 *
441 * Return value:
442 * target address on Success / 0 on Failue
443 */
444static u64
445get_target_identifier(peidx_table_t *peidx)
446{
447 u64 target_address = 0;
448 sal_log_mod_error_info_t *smei;
449 pal_cache_check_info_t *pcci;
450 int i, level = 9;
451
452 /*
453 * Look through the cache checks for a valid target identifier
454 * If more than one valid target identifier, return the one
455 * with the lowest cache level.
456 */
457 for (i = 0; i < peidx_cache_check_num(peidx); i++) {
458 smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i);
459 if (smei->valid.target_identifier && smei->target_identifier) {
460 pcci = (pal_cache_check_info_t *)&(smei->check_info);
461 if (!target_address || (pcci->level < level)) {
462 target_address = smei->target_identifier;
463 level = pcci->level;
464 continue;
465 }
466 }
467 }
468 if (target_address)
469 return target_address;
470
471 /*
472 * Look at the bus check for a valid target identifier
473 */
474 smei = peidx_bus_check(peidx, 0);
475 if (smei && smei->valid.target_identifier)
476 return smei->target_identifier;
477
478 return 0;
479}
480
481/**
438 * recover_from_read_error - Try to recover the errors which type are "read"s. 482 * recover_from_read_error - Try to recover the errors which type are "read"s.
439 * @slidx: pointer of index of SAL error record 483 * @slidx: pointer of index of SAL error record
440 * @peidx: pointer of index of processor error section 484 * @peidx: pointer of index of processor error section
@@ -450,13 +494,14 @@ recover_from_read_error(slidx_table_t *slidx,
450 peidx_table_t *peidx, pal_bus_check_info_t *pbci, 494 peidx_table_t *peidx, pal_bus_check_info_t *pbci,
451 struct ia64_sal_os_state *sos) 495 struct ia64_sal_os_state *sos)
452{ 496{
453 sal_log_mod_error_info_t *smei; 497 u64 target_identifier;
454 pal_min_state_area_t *pmsa; 498 pal_min_state_area_t *pmsa;
455 struct ia64_psr *psr1, *psr2; 499 struct ia64_psr *psr1, *psr2;
456 ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook; 500 ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
457 501
458 /* Is target address valid? */ 502 /* Is target address valid? */
459 if (!pbci->tv) 503 target_identifier = get_target_identifier(peidx);
504 if (!target_identifier)
460 return fatal_mca("target address not valid"); 505 return fatal_mca("target address not valid");
461 506
462 /* 507 /*
@@ -487,32 +532,28 @@ recover_from_read_error(slidx_table_t *slidx,
487 pmsa = sos->pal_min_state; 532 pmsa = sos->pal_min_state;
488 if (psr1->cpl != 0 || 533 if (psr1->cpl != 0 ||
489 ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) { 534 ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
490 smei = peidx_bus_check(peidx, 0); 535 /*
491 if (smei->valid.target_identifier) { 536 * setup for resume to bottom half of MCA,
492 /* 537 * "mca_handler_bhhook"
493 * setup for resume to bottom half of MCA, 538 */
494 * "mca_handler_bhhook" 539 /* pass to bhhook as argument (gr8, ...) */
495 */ 540 pmsa->pmsa_gr[8-1] = target_identifier;
496 /* pass to bhhook as argument (gr8, ...) */ 541 pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
497 pmsa->pmsa_gr[8-1] = smei->target_identifier; 542 pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
498 pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip; 543 /* set interrupted return address (but no use) */
499 pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr; 544 pmsa->pmsa_br0 = pmsa->pmsa_iip;
500 /* set interrupted return address (but no use) */ 545 /* change resume address to bottom half */
501 pmsa->pmsa_br0 = pmsa->pmsa_iip; 546 pmsa->pmsa_iip = mca_hdlr_bh->fp;
502 /* change resume address to bottom half */ 547 pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
503 pmsa->pmsa_iip = mca_hdlr_bh->fp; 548 /* set cpl with kernel mode */
504 pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp; 549 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
505 /* set cpl with kernel mode */ 550 psr2->cpl = 0;
506 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; 551 psr2->ri = 0;
507 psr2->cpl = 0; 552 psr2->bn = 1;
508 psr2->ri = 0; 553 psr2->i = 0;
509 psr2->bn = 1; 554
510 psr2->i = 0; 555 return mca_recovered("user memory corruption. "
511
512 return mca_recovered("user memory corruption. "
513 "kill affected process - recovered."); 556 "kill affected process - recovered.");
514 }
515
516 } 557 }
517 558
518 return fatal_mca("kernel context not recovered, iip 0x%lx\n", 559 return fatal_mca("kernel context not recovered, iip 0x%lx\n",
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index ebaf1e685f5e..0b533441c3c9 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -21,11 +21,12 @@ pal_entry_point:
21 .text 21 .text
22 22
23/* 23/*
24 * Set the PAL entry point address. This could be written in C code, but we do it here 24 * Set the PAL entry point address. This could be written in C code, but we
25 * to keep it all in one module (besides, it's so trivial that it's 25 * do it here to keep it all in one module (besides, it's so trivial that it's
26 * not a big deal). 26 * not a big deal).
27 * 27 *
28 * in0 Address of the PAL entry point (text address, NOT a function descriptor). 28 * in0 Address of the PAL entry point (text address, NOT a function
29 * descriptor).
29 */ 30 */
30GLOBAL_ENTRY(ia64_pal_handler_init) 31GLOBAL_ENTRY(ia64_pal_handler_init)
31 alloc r3=ar.pfs,1,0,0,0 32 alloc r3=ar.pfs,1,0,0,0
@@ -36,9 +37,9 @@ GLOBAL_ENTRY(ia64_pal_handler_init)
36END(ia64_pal_handler_init) 37END(ia64_pal_handler_init)
37 38
38/* 39/*
39 * Default PAL call handler. This needs to be coded in assembly because it uses 40 * Default PAL call handler. This needs to be coded in assembly because it
40 * the static calling convention, i.e., the RSE may not be used and calls are 41 * uses the static calling convention, i.e., the RSE may not be used and
41 * done via "br.cond" (not "br.call"). 42 * calls are done via "br.cond" (not "br.call").
42 */ 43 */
43GLOBAL_ENTRY(ia64_pal_default_handler) 44GLOBAL_ENTRY(ia64_pal_default_handler)
44 mov r8=-1 45 mov r8=-1
@@ -50,12 +51,10 @@ END(ia64_pal_default_handler)
50 * 51 *
51 * in0 Index of PAL service 52 * in0 Index of PAL service
52 * in1 - in3 Remaining PAL arguments 53 * in1 - in3 Remaining PAL arguments
53 * in4 1 ==> clear psr.ic, 0 ==> don't clear psr.ic
54 *
55 */ 54 */
56GLOBAL_ENTRY(ia64_pal_call_static) 55GLOBAL_ENTRY(ia64_pal_call_static)
57 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) 56 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
58 alloc loc1 = ar.pfs,5,5,0,0 57 alloc loc1 = ar.pfs,4,5,0,0
59 movl loc2 = pal_entry_point 58 movl loc2 = pal_entry_point
601: { 591: {
61 mov r28 = in0 60 mov r28 = in0
@@ -64,7 +63,6 @@ GLOBAL_ENTRY(ia64_pal_call_static)
64 } 63 }
65 ;; 64 ;;
66 ld8 loc2 = [loc2] // loc2 <- entry point 65 ld8 loc2 = [loc2] // loc2 <- entry point
67 tbit.nz p6,p7 = in4, 0
68 adds r8 = 1f-1b,r8 66 adds r8 = 1f-1b,r8
69 mov loc4=ar.rsc // save RSE configuration 67 mov loc4=ar.rsc // save RSE configuration
70 ;; 68 ;;
@@ -74,13 +72,11 @@ GLOBAL_ENTRY(ia64_pal_call_static)
74 .body 72 .body
75 mov r30 = in2 73 mov r30 = in2
76 74
77(p6) rsm psr.i | psr.ic
78 mov r31 = in3 75 mov r31 = in3
79 mov b7 = loc2 76 mov b7 = loc2
80 77
81(p7) rsm psr.i 78 rsm psr.i
82 ;; 79 ;;
83(p6) srlz.i
84 mov rp = r8 80 mov rp = r8
85 br.cond.sptk.many b7 81 br.cond.sptk.many b7
861: mov psr.l = loc3 821: mov psr.l = loc3
@@ -96,8 +92,8 @@ END(ia64_pal_call_static)
96 * Make a PAL call using the stacked registers calling convention. 92 * Make a PAL call using the stacked registers calling convention.
97 * 93 *
98 * Inputs: 94 * Inputs:
99 * in0 Index of PAL service 95 * in0 Index of PAL service
100 * in2 - in3 Remaning PAL arguments 96 * in2 - in3 Remaining PAL arguments
101 */ 97 */
102GLOBAL_ENTRY(ia64_pal_call_stacked) 98GLOBAL_ENTRY(ia64_pal_call_stacked)
103 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) 99 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
@@ -131,18 +127,18 @@ END(ia64_pal_call_stacked)
131 * Make a physical mode PAL call using the static registers calling convention. 127 * Make a physical mode PAL call using the static registers calling convention.
132 * 128 *
133 * Inputs: 129 * Inputs:
134 * in0 Index of PAL service 130 * in0 Index of PAL service
135 * in2 - in3 Remaning PAL arguments 131 * in2 - in3 Remaining PAL arguments
136 * 132 *
137 * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel. 133 * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
138 * So we don't need to clear them. 134 * So we don't need to clear them.
139 */ 135 */
140#define PAL_PSR_BITS_TO_CLEAR \ 136#define PAL_PSR_BITS_TO_CLEAR \
141 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT | \ 137 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT |\
142 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ 138 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
143 IA64_PSR_DFL | IA64_PSR_DFH) 139 IA64_PSR_DFL | IA64_PSR_DFH)
144 140
145#define PAL_PSR_BITS_TO_SET \ 141#define PAL_PSR_BITS_TO_SET \
146 (IA64_PSR_BN) 142 (IA64_PSR_BN)
147 143
148 144
@@ -178,7 +174,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
178 ;; 174 ;;
179 andcm r16=loc3,r16 // removes bits to clear from psr 175 andcm r16=loc3,r16 // removes bits to clear from psr
180 br.call.sptk.many rp=ia64_switch_mode_phys 176 br.call.sptk.many rp=ia64_switch_mode_phys
181.ret1: mov rp = r8 // install return address (physical) 177 mov rp = r8 // install return address (physical)
182 mov loc5 = r19 178 mov loc5 = r19
183 mov loc6 = r20 179 mov loc6 = r20
184 br.cond.sptk.many b7 180 br.cond.sptk.many b7
@@ -188,7 +184,6 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
188 mov r19=loc5 184 mov r19=loc5
189 mov r20=loc6 185 mov r20=loc6
190 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 186 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
191.ret2:
192 mov psr.l = loc3 // restore init PSR 187 mov psr.l = loc3 // restore init PSR
193 188
194 mov ar.pfs = loc1 189 mov ar.pfs = loc1
@@ -203,8 +198,8 @@ END(ia64_pal_call_phys_static)
203 * Make a PAL call using the stacked registers in physical mode. 198 * Make a PAL call using the stacked registers in physical mode.
204 * 199 *
205 * Inputs: 200 * Inputs:
206 * in0 Index of PAL service 201 * in0 Index of PAL service
207 * in2 - in3 Remaning PAL arguments 202 * in2 - in3 Remaining PAL arguments
208 */ 203 */
209GLOBAL_ENTRY(ia64_pal_call_phys_stacked) 204GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
210 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) 205 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
@@ -212,7 +207,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
212 movl loc2 = pal_entry_point 207 movl loc2 = pal_entry_point
2131: { 2081: {
214 mov r28 = in0 // copy procedure index 209 mov r28 = in0 // copy procedure index
215 mov loc0 = rp // save rp 210 mov loc0 = rp // save rp
216 } 211 }
217 .body 212 .body
218 ;; 213 ;;
@@ -245,7 +240,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
245 mov r16=loc3 // r16= original psr 240 mov r16=loc3 // r16= original psr
246 mov r19=loc5 241 mov r19=loc5
247 mov r20=loc6 242 mov r20=loc6
248 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 243 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
249 244
250 mov psr.l = loc3 // restore init PSR 245 mov psr.l = loc3 // restore init PSR
251 mov ar.pfs = loc1 246 mov ar.pfs = loc1
@@ -257,10 +252,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
257END(ia64_pal_call_phys_stacked) 252END(ia64_pal_call_phys_stacked)
258 253
259/* 254/*
260 * Save scratch fp scratch regs which aren't saved in pt_regs already (fp10-fp15). 255 * Save scratch fp scratch regs which aren't saved in pt_regs already
256 * (fp10-fp15).
261 * 257 *
262 * NOTE: We need to do this since firmware (SAL and PAL) may use any of the scratch 258 * NOTE: We need to do this since firmware (SAL and PAL) may use any of the
263 * regs fp-low partition. 259 * scratch regs fp-low partition.
264 * 260 *
265 * Inputs: 261 * Inputs:
266 * in0 Address of stack storage for fp regs 262 * in0 Address of stack storage for fp regs
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 0b546e2b36ac..a71df9ae0397 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -16,6 +16,7 @@
16 * 02/05/2001 S.Eranian fixed module support 16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes 17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug 18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19 * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
19 */ 20 */
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
@@ -314,13 +315,20 @@ vm_info(char *page)
314 "Protection Key Registers(PKR) : %d\n" 315 "Protection Key Registers(PKR) : %d\n"
315 "Implemented bits in PKR.key : %d\n" 316 "Implemented bits in PKR.key : %d\n"
316 "Hash Tag ID : 0x%x\n" 317 "Hash Tag ID : 0x%x\n"
317 "Size of RR.rid : %d\n", 318 "Size of RR.rid : %d\n"
319 "Max Purges : ",
318 vm_info_1.pal_vm_info_1_s.phys_add_size, 320 vm_info_1.pal_vm_info_1_s.phys_add_size,
319 vm_info_2.pal_vm_info_2_s.impl_va_msb+1, 321 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
320 vm_info_1.pal_vm_info_1_s.max_pkr+1, 322 vm_info_1.pal_vm_info_1_s.max_pkr+1,
321 vm_info_1.pal_vm_info_1_s.key_size, 323 vm_info_1.pal_vm_info_1_s.key_size,
322 vm_info_1.pal_vm_info_1_s.hash_tag_id, 324 vm_info_1.pal_vm_info_1_s.hash_tag_id,
323 vm_info_2.pal_vm_info_2_s.rid_size); 325 vm_info_2.pal_vm_info_2_s.rid_size);
326 if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
327 p += sprintf(p, "unlimited\n");
328 else
329 p += sprintf(p, "%d\n",
330 vm_info_2.pal_vm_info_2_s.max_purges ?
331 vm_info_2.pal_vm_info_2_s.max_purges : 1);
324 } 332 }
325 333
326 if (ia64_pal_mem_attrib(&attrib) == 0) { 334 if (ia64_pal_mem_attrib(&attrib) == 0) {
@@ -467,7 +475,11 @@ static const char *proc_features[]={
467 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, 475 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
468 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, 476 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
469 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL, 477 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
470 NULL,NULL,NULL,NULL,NULL, 478 "Unimplemented instruction address fault",
479 "INIT, PMI, and LINT pins",
480 "Simple unimplemented instr addresses",
481 "Variable P-state performance",
482 "Virtual machine features implemented",
471 "XIP,XPSR,XFS implemented", 483 "XIP,XPSR,XFS implemented",
472 "XR1-XR3 implemented", 484 "XR1-XR3 implemented",
473 "Disable dynamic predicate prediction", 485 "Disable dynamic predicate prediction",
@@ -475,7 +487,11 @@ static const char *proc_features[]={
475 "Disable dynamic data cache prefetch", 487 "Disable dynamic data cache prefetch",
476 "Disable dynamic inst cache prefetch", 488 "Disable dynamic inst cache prefetch",
477 "Disable dynamic branch prediction", 489 "Disable dynamic branch prediction",
478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 490 NULL, NULL, NULL, NULL,
491 "Disable P-states",
492 "Enable MCA on Data Poisoning",
493 "Enable vmsw instruction",
494 "Enable extern environmental notification",
479 "Disable BINIT on processor time-out", 495 "Disable BINIT on processor time-out",
480 "Disable dynamic power management (DPM)", 496 "Disable dynamic power management (DPM)",
481 "Disable coherency", 497 "Disable coherency",
@@ -952,7 +968,6 @@ remove_palinfo_proc_entries(unsigned int hcpu)
952 } 968 }
953} 969}
954 970
955#ifdef CONFIG_HOTPLUG_CPU
956static int palinfo_cpu_callback(struct notifier_block *nfb, 971static int palinfo_cpu_callback(struct notifier_block *nfb,
957 unsigned long action, void *hcpu) 972 unsigned long action, void *hcpu)
958{ 973{
@@ -974,7 +989,6 @@ static struct notifier_block palinfo_cpu_notifier =
974 .notifier_call = palinfo_cpu_callback, 989 .notifier_call = palinfo_cpu_callback,
975 .priority = 0, 990 .priority = 0,
976}; 991};
977#endif
978 992
979static int __init 993static int __init
980palinfo_init(void) 994palinfo_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 281004ff7b00..aa94f60fa8e7 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -853,9 +853,8 @@ pfm_context_alloc(void)
853 * allocate context descriptor 853 * allocate context descriptor
854 * must be able to free with interrupts disabled 854 * must be able to free with interrupts disabled
855 */ 855 */
856 ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL); 856 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
857 if (ctx) { 857 if (ctx) {
858 memset(ctx, 0, sizeof(pfm_context_t));
859 DPRINT(("alloc ctx @%p\n", ctx)); 858 DPRINT(("alloc ctx @%p\n", ctx));
860 } 859 }
861 return ctx; 860 return ctx;
@@ -2189,13 +2188,13 @@ pfm_alloc_fd(struct file **cfile)
2189 /* 2188 /*
2190 * allocate a new dcache entry 2189 * allocate a new dcache entry
2191 */ 2190 */
2192 file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); 2191 file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2193 if (!file->f_dentry) goto out; 2192 if (!file->f_path.dentry) goto out;
2194 2193
2195 file->f_dentry->d_op = &pfmfs_dentry_operations; 2194 file->f_path.dentry->d_op = &pfmfs_dentry_operations;
2196 2195
2197 d_add(file->f_dentry, inode); 2196 d_add(file->f_path.dentry, inode);
2198 file->f_vfsmnt = mntget(pfmfs_mnt); 2197 file->f_path.mnt = mntget(pfmfs_mnt);
2199 file->f_mapping = inode->i_mapping; 2198 file->f_mapping = inode->i_mapping;
2200 2199
2201 file->f_op = &pfm_file_ops; 2200 file->f_op = &pfm_file_ops;
@@ -2302,7 +2301,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
2302 DPRINT(("smpl_buf @%p\n", smpl_buf)); 2301 DPRINT(("smpl_buf @%p\n", smpl_buf));
2303 2302
2304 /* allocate vma */ 2303 /* allocate vma */
2305 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 2304 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2306 if (!vma) { 2305 if (!vma) {
2307 DPRINT(("Cannot allocate vma\n")); 2306 DPRINT(("Cannot allocate vma\n"));
2308 goto error_kmem; 2307 goto error_kmem;
@@ -5558,12 +5557,13 @@ report_spurious2:
5558} 5557}
5559 5558
5560static irqreturn_t 5559static irqreturn_t
5561pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) 5560pfm_interrupt_handler(int irq, void *arg)
5562{ 5561{
5563 unsigned long start_cycles, total_cycles; 5562 unsigned long start_cycles, total_cycles;
5564 unsigned long min, max; 5563 unsigned long min, max;
5565 int this_cpu; 5564 int this_cpu;
5566 int ret; 5565 int ret;
5566 struct pt_regs *regs = get_irq_regs();
5567 5567
5568 this_cpu = get_cpu(); 5568 this_cpu = get_cpu();
5569 if (likely(!pfm_alt_intr_handler)) { 5569 if (likely(!pfm_alt_intr_handler)) {
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
index cd06ac6a686c..7f8da4c7ca67 100644
--- a/arch/ia64/kernel/perfmon_montecito.h
+++ b/arch/ia64/kernel/perfmon_montecito.h
@@ -45,16 +45,16 @@ static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
45/* pmc29 */ { PFM_REG_NOTIMPL, }, 45/* pmc29 */ { PFM_REG_NOTIMPL, },
46/* pmc30 */ { PFM_REG_NOTIMPL, }, 46/* pmc30 */ { PFM_REG_NOTIMPL, },
47/* pmc31 */ { PFM_REG_NOTIMPL, }, 47/* pmc31 */ { PFM_REG_NOTIMPL, },
48/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 48/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffffUL, 0x30f01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
49/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 49/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
50/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 50/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffffUL, 0xf01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
51/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 51/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
52/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 52/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
53/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}}, 53/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
54/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 54/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
55/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, 55/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
56/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}}, 56/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
57/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 57/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
58/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, 58/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
59 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */ 59 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
60}; 60};
@@ -185,7 +185,7 @@ pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cn
185 DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded)); 185 DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
186 186
187 if (cnum == 41 && is_loaded 187 if (cnum == 41 && is_loaded
188 && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) { 188 && (tmpval & 0x1e00000000000UL) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
189 189
190 DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval)); 190 DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
191 191
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..ae473e3f2a0d
--- /dev/null
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -0,0 +1,334 @@
1/*
2 * arch/ia64/kernel/relocate_kernel.S
3 *
4 * Relocate kexec'able kernel and start it
5 *
6 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
7 * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
8 * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
9 *
10 * This source code is licensed under the GNU General Public License,
11 * Version 2. See the file COPYING for more details.
12 */
13#include <asm/asmmacro.h>
14#include <asm/kregs.h>
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/mca_asm.h>
18
19 /* Must be relocatable PIC code callable as a C function
20 */
21GLOBAL_ENTRY(relocate_new_kernel)
22 .prologue
23 alloc r31=ar.pfs,4,0,0,0
24 .body
25.reloc_entry:
26{
27 rsm psr.i| psr.ic
28 mov r2=ip
29}
30 ;;
31{
32 flushrs // must be first insn in group
33 srlz.i
34}
35 ;;
36 dep r2=0,r2,61,3 //to physical address
37 ;;
38 //first switch to physical mode
39 add r3=1f-.reloc_entry, r2
40 movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
41 mov ar.rsc=0 // put RSE in enforced lazy mode
42 ;;
43 add sp=(memory_stack_end - 16 - .reloc_entry),r2
44 add r8=(register_stack - .reloc_entry),r2
45 ;;
46 mov r18=ar.rnat
47 mov ar.bspstore=r8
48 ;;
49 mov cr.ipsr=r16
50 mov cr.iip=r3
51 mov cr.ifs=r0
52 srlz.i
53 ;;
54 mov ar.rnat=r18
55 rfi
56 ;;
571:
58 //physical mode code begin
59 mov b6=in1
60 dep r28=0,in2,61,3 //to physical address
61
62 // purge all TC entries
63#define O(member) IA64_CPUINFO_##member##_OFFSET
64 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
65 ;;
66 addl r17=O(PTCE_STRIDE),r2
67 addl r2=O(PTCE_BASE),r2
68 ;;
69 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
70 ld4 r19=[r2],4 // r19=ptce_count[0]
71 ld4 r21=[r17],4 // r21=ptce_stride[0]
72 ;;
73 ld4 r20=[r2] // r20=ptce_count[1]
74 ld4 r22=[r17] // r22=ptce_stride[1]
75 mov r24=r0
76 ;;
77 adds r20=-1,r20
78 ;;
79#undef O
802:
81 cmp.ltu p6,p7=r24,r19
82(p7) br.cond.dpnt.few 4f
83 mov ar.lc=r20
843:
85 ptc.e r18
86 ;;
87 add r18=r22,r18
88 br.cloop.sptk.few 3b
89 ;;
90 add r18=r21,r18
91 add r24=1,r24
92 ;;
93 br.sptk.few 2b
944:
95 srlz.i
96 ;;
97 //purge TR entry for kernel text and data
98 movl r16=KERNEL_START
99 mov r18=KERNEL_TR_PAGE_SHIFT<<2
100 ;;
101 ptr.i r16, r18
102 ptr.d r16, r18
103 ;;
104 srlz.i
105 ;;
106
107 // purge TR entry for percpu data
108 movl r16=PERCPU_ADDR
109 mov r18=PERCPU_PAGE_SHIFT<<2
110 ;;
111 ptr.d r16,r18
112 ;;
113 srlz.d
114 ;;
115
116 // purge TR entry for pal code
117 mov r16=in3
118 mov r18=IA64_GRANULE_SHIFT<<2
119 ;;
120 ptr.i r16,r18
121 ;;
122 srlz.i
123 ;;
124
125 // purge TR entry for stack
126 mov r16=IA64_KR(CURRENT_STACK)
127 ;;
128 shl r16=r16,IA64_GRANULE_SHIFT
129 movl r19=PAGE_OFFSET
130 ;;
131 add r16=r19,r16
132 mov r18=IA64_GRANULE_SHIFT<<2
133 ;;
134 ptr.d r16,r18
135 ;;
136 srlz.i
137 ;;
138
139 //copy segments
140 movl r16=PAGE_MASK
141 mov r30=in0 // in0 is page_list
142 br.sptk.few .dest_page
143 ;;
144.loop:
145 ld8 r30=[in0], 8;;
146.dest_page:
147 tbit.z p0, p6=r30, 0;; // 0x1 dest page
148(p6) and r17=r30, r16
149(p6) br.cond.sptk.few .loop;;
150
151 tbit.z p0, p6=r30, 1;; // 0x2 indirect page
152(p6) and in0=r30, r16
153(p6) br.cond.sptk.few .loop;;
154
155 tbit.z p0, p6=r30, 2;; // 0x4 end flag
156(p6) br.cond.sptk.few .end_loop;;
157
158 tbit.z p6, p0=r30, 3;; // 0x8 source page
159(p6) br.cond.sptk.few .loop
160
161 and r18=r30, r16
162
163 // simple copy page, may optimize later
164 movl r14=PAGE_SIZE/8 - 1;;
165 mov ar.lc=r14;;
1661:
167 ld8 r14=[r18], 8;;
168 st8 [r17]=r14;;
169 fc.i r17
170 add r17=8, r17
171 br.ctop.sptk.few 1b
172 br.sptk.few .loop
173 ;;
174
175.end_loop:
176 sync.i // for fc.i
177 ;;
178 srlz.i
179 ;;
180 srlz.d
181 ;;
182 br.call.sptk.many b0=b6;;
183
184.align 32
185memory_stack:
186 .fill 8192, 1, 0
187memory_stack_end:
188register_stack:
189 .fill 8192, 1, 0
190register_stack_end:
191relocate_new_kernel_end:
192END(relocate_new_kernel)
193
194.global relocate_new_kernel_size
195relocate_new_kernel_size:
196 data8 relocate_new_kernel_end - relocate_new_kernel
197
198GLOBAL_ENTRY(ia64_dump_cpu_regs)
199 .prologue
200 alloc loc0=ar.pfs,1,2,0,0
201 .body
202 mov ar.rsc=0 // put RSE in enforced lazy mode
203 add loc1=4*8, in0 // save r4 and r5 first
204 ;;
205{
206 flushrs // flush dirty regs to backing store
207 srlz.i
208}
209 st8 [loc1]=r4, 8
210 ;;
211 st8 [loc1]=r5, 8
212 ;;
213 add loc1=32*8, in0
214 mov r4=ar.rnat
215 ;;
216 st8 [in0]=r0, 8 // r0
217 st8 [loc1]=r4, 8 // rnat
218 mov r5=pr
219 ;;
220 st8 [in0]=r1, 8 // r1
221 st8 [loc1]=r5, 8 // pr
222 mov r4=b0
223 ;;
224 st8 [in0]=r2, 8 // r2
225 st8 [loc1]=r4, 8 // b0
226 mov r5=b1;
227 ;;
228 st8 [in0]=r3, 24 // r3
229 st8 [loc1]=r5, 8 // b1
230 mov r4=b2
231 ;;
232 st8 [in0]=r6, 8 // r6
233 st8 [loc1]=r4, 8 // b2
234 mov r5=b3
235 ;;
236 st8 [in0]=r7, 8 // r7
237 st8 [loc1]=r5, 8 // b3
238 mov r4=b4
239 ;;
240 st8 [in0]=r8, 8 // r8
241 st8 [loc1]=r4, 8 // b4
242 mov r5=b5
243 ;;
244 st8 [in0]=r9, 8 // r9
245 st8 [loc1]=r5, 8 // b5
246 mov r4=b6
247 ;;
248 st8 [in0]=r10, 8 // r10
249 st8 [loc1]=r5, 8 // b6
250 mov r5=b7
251 ;;
252 st8 [in0]=r11, 8 // r11
253 st8 [loc1]=r5, 8 // b7
254 mov r4=b0
255 ;;
256 st8 [in0]=r12, 8 // r12
257 st8 [loc1]=r4, 8 // ip
258 mov r5=loc0
259 ;;
260 st8 [in0]=r13, 8 // r13
261 extr.u r5=r5, 0, 38 // ar.pfs.pfm
262 mov r4=r0 // user mask
263 ;;
264 st8 [in0]=r14, 8 // r14
265 st8 [loc1]=r5, 8 // cfm
266 ;;
267 st8 [in0]=r15, 8 // r15
268 st8 [loc1]=r4, 8 // user mask
269 mov r5=ar.rsc
270 ;;
271 st8 [in0]=r16, 8 // r16
272 st8 [loc1]=r5, 8 // ar.rsc
273 mov r4=ar.bsp
274 ;;
275 st8 [in0]=r17, 8 // r17
276 st8 [loc1]=r4, 8 // ar.bsp
277 mov r5=ar.bspstore
278 ;;
279 st8 [in0]=r18, 8 // r18
280 st8 [loc1]=r5, 8 // ar.bspstore
281 mov r4=ar.rnat
282 ;;
283 st8 [in0]=r19, 8 // r19
284 st8 [loc1]=r4, 8 // ar.rnat
285 mov r5=ar.ccv
286 ;;
287 st8 [in0]=r20, 8 // r20
288 st8 [loc1]=r5, 8 // ar.ccv
289 mov r4=ar.unat
290 ;;
291 st8 [in0]=r21, 8 // r21
292 st8 [loc1]=r4, 8 // ar.unat
293 mov r5 = ar.fpsr
294 ;;
295 st8 [in0]=r22, 8 // r22
296 st8 [loc1]=r5, 8 // ar.fpsr
297 mov r4 = ar.unat
298 ;;
299 st8 [in0]=r23, 8 // r23
300 st8 [loc1]=r4, 8 // unat
301 mov r5 = ar.fpsr
302 ;;
303 st8 [in0]=r24, 8 // r24
304 st8 [loc1]=r5, 8 // fpsr
305 mov r4 = ar.pfs
306 ;;
307 st8 [in0]=r25, 8 // r25
308 st8 [loc1]=r4, 8 // ar.pfs
309 mov r5 = ar.lc
310 ;;
311 st8 [in0]=r26, 8 // r26
312 st8 [loc1]=r5, 8 // ar.lc
313 mov r4 = ar.ec
314 ;;
315 st8 [in0]=r27, 8 // r27
316 st8 [loc1]=r4, 8 // ar.ec
317 mov r5 = ar.csd
318 ;;
319 st8 [in0]=r28, 8 // r28
320 st8 [loc1]=r5, 8 // ar.csd
321 mov r4 = ar.ssd
322 ;;
323 st8 [in0]=r29, 8 // r29
324 st8 [loc1]=r4, 8 // ar.ssd
325 ;;
326 st8 [in0]=r30, 8 // r30
327 ;;
328 st8 [in0]=r31, 8 // r31
329 mov ar.pfs=loc0
330 ;;
331 br.ret.sptk.many rp
332END(ia64_dump_cpu_regs)
333
334
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 642fdc7b969d..20bad78b5073 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -223,12 +223,13 @@ static void __init sal_desc_ap_wakeup(void *p) { }
223 */ 223 */
224static int sal_cache_flush_drops_interrupts; 224static int sal_cache_flush_drops_interrupts;
225 225
226static void __init 226void __init
227check_sal_cache_flush (void) 227check_sal_cache_flush (void)
228{ 228{
229 unsigned long flags; 229 unsigned long flags;
230 int cpu; 230 int cpu;
231 u64 vector; 231 u64 vector, cache_type = 3;
232 struct ia64_sal_retval isrv;
232 233
233 cpu = get_cpu(); 234 cpu = get_cpu();
234 local_irq_save(flags); 235 local_irq_save(flags);
@@ -243,7 +244,10 @@ check_sal_cache_flush (void)
243 while (!ia64_get_irr(IA64_TIMER_VECTOR)) 244 while (!ia64_get_irr(IA64_TIMER_VECTOR))
244 cpu_relax(); 245 cpu_relax();
245 246
246 ia64_sal_cache_flush(3); 247 SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
248
249 if (isrv.status)
250 printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status);
247 251
248 if (ia64_get_irr(IA64_TIMER_VECTOR)) { 252 if (ia64_get_irr(IA64_TIMER_VECTOR)) {
249 vector = ia64_get_ivr(); 253 vector = ia64_get_ivr();
@@ -331,7 +335,6 @@ ia64_sal_init (struct ia64_sal_systab *systab)
331 p += SAL_DESC_SIZE(*p); 335 p += SAL_DESC_SIZE(*p);
332 } 336 }
333 337
334 check_sal_cache_flush();
335} 338}
336 339
337int 340int
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e63b8ca5344a..e375a2f0f2c3 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -302,7 +302,7 @@ salinfo_event_open(struct inode *inode, struct file *file)
302static ssize_t 302static ssize_t
303salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) 303salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
304{ 304{
305 struct inode *inode = file->f_dentry->d_inode; 305 struct inode *inode = file->f_path.dentry->d_inode;
306 struct proc_dir_entry *entry = PDE(inode); 306 struct proc_dir_entry *entry = PDE(inode);
307 struct salinfo_data *data = entry->data; 307 struct salinfo_data *data = entry->data;
308 char cmd[32]; 308 char cmd[32];
@@ -464,7 +464,7 @@ retry:
464static ssize_t 464static ssize_t
465salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) 465salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
466{ 466{
467 struct inode *inode = file->f_dentry->d_inode; 467 struct inode *inode = file->f_path.dentry->d_inode;
468 struct proc_dir_entry *entry = PDE(inode); 468 struct proc_dir_entry *entry = PDE(inode);
469 struct salinfo_data *data = entry->data; 469 struct salinfo_data *data = entry->data;
470 u8 *buf; 470 u8 *buf;
@@ -525,7 +525,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
525static ssize_t 525static ssize_t
526salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) 526salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
527{ 527{
528 struct inode *inode = file->f_dentry->d_inode; 528 struct inode *inode = file->f_path.dentry->d_inode;
529 struct proc_dir_entry *entry = PDE(inode); 529 struct proc_dir_entry *entry = PDE(inode);
530 struct salinfo_data *data = entry->data; 530 struct salinfo_data *data = entry->data;
531 char cmd[32]; 531 char cmd[32];
@@ -575,7 +575,6 @@ static struct file_operations salinfo_data_fops = {
575 .write = salinfo_log_write, 575 .write = salinfo_log_write,
576}; 576};
577 577
578#ifdef CONFIG_HOTPLUG_CPU
579static int __devinit 578static int __devinit
580salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 579salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
581{ 580{
@@ -620,7 +619,6 @@ static struct notifier_block salinfo_cpu_notifier =
620 .notifier_call = salinfo_cpu_callback, 619 .notifier_call = salinfo_cpu_callback,
621 .priority = 0, 620 .priority = 0,
622}; 621};
623#endif /* CONFIG_HOTPLUG_CPU */
624 622
625static int __init 623static int __init
626salinfo_init(void) 624salinfo_init(void)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c4caa8003492..14e1200376a9 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -43,6 +43,8 @@
43#include <linux/initrd.h> 43#include <linux/initrd.h>
44#include <linux/pm.h> 44#include <linux/pm.h>
45#include <linux/cpufreq.h> 45#include <linux/cpufreq.h>
46#include <linux/kexec.h>
47#include <linux/crash_dump.h>
46 48
47#include <asm/ia32.h> 49#include <asm/ia32.h>
48#include <asm/machvec.h> 50#include <asm/machvec.h>
@@ -252,6 +254,41 @@ reserve_memory (void)
252 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 254 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
253 n++; 255 n++;
254 256
257#ifdef CONFIG_KEXEC
258 /* crashkernel=size@offset specifies the size to reserve for a crash
259 * kernel.(offset is ingored for keep compatibility with other archs)
260 * By reserving this memory we guarantee that linux never set's it
261 * up as a DMA target.Useful for holding code to do something
262 * appropriate after a kernel panic.
263 */
264 {
265 char *from = strstr(saved_command_line, "crashkernel=");
266 unsigned long base, size;
267 if (from) {
268 size = memparse(from + 12, &from);
269 if (size) {
270 sort_regions(rsvd_region, n);
271 base = kdump_find_rsvd_region(size,
272 rsvd_region, n);
273 if (base != ~0UL) {
274 rsvd_region[n].start =
275 (unsigned long)__va(base);
276 rsvd_region[n].end =
277 (unsigned long)__va(base + size);
278 n++;
279 crashk_res.start = base;
280 crashk_res.end = base + size - 1;
281 }
282 }
283 }
284 efi_memmap_res.start = ia64_boot_param->efi_memmap;
285 efi_memmap_res.end = efi_memmap_res.start +
286 ia64_boot_param->efi_memmap_size;
287 boot_param_res.start = __pa(ia64_boot_param);
288 boot_param_res.end = boot_param_res.start +
289 sizeof(*ia64_boot_param);
290 }
291#endif
255 /* end of memory marker */ 292 /* end of memory marker */
256 rsvd_region[n].start = ~0UL; 293 rsvd_region[n].start = ~0UL;
257 rsvd_region[n].end = ~0UL; 294 rsvd_region[n].end = ~0UL;
@@ -263,6 +300,7 @@ reserve_memory (void)
263 sort_regions(rsvd_region, num_rsvd_regions); 300 sort_regions(rsvd_region, num_rsvd_regions);
264} 301}
265 302
303
266/** 304/**
267 * find_initrd - get initrd parameters from the boot parameter structure 305 * find_initrd - get initrd parameters from the boot parameter structure
268 * 306 *
@@ -457,6 +495,8 @@ setup_arch (char **cmdline_p)
457 cpu_init(); /* initialize the bootstrap CPU */ 495 cpu_init(); /* initialize the bootstrap CPU */
458 mmu_context_init(); /* initialize context_id bitmap */ 496 mmu_context_init(); /* initialize context_id bitmap */
459 497
498 check_sal_cache_flush();
499
460#ifdef CONFIG_ACPI 500#ifdef CONFIG_ACPI
461 acpi_boot_init(); 501 acpi_boot_init();
462#endif 502#endif
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 657ac99a451c..b1b9aa4364b9 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -30,6 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/efi.h> 31#include <linux/efi.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <linux/kexec.h>
33 34
34#include <asm/atomic.h> 35#include <asm/atomic.h>
35#include <asm/current.h> 36#include <asm/current.h>
@@ -66,6 +67,7 @@ static volatile struct call_data_struct *call_data;
66 67
67#define IPI_CALL_FUNC 0 68#define IPI_CALL_FUNC 0
68#define IPI_CPU_STOP 1 69#define IPI_CPU_STOP 1
70#define IPI_KDUMP_CPU_STOP 3
69 71
70/* This needs to be cacheline aligned because it is written to by *other* CPUs. */ 72/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
71static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; 73static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
@@ -108,7 +110,7 @@ cpu_die(void)
108} 110}
109 111
110irqreturn_t 112irqreturn_t
111handle_IPI (int irq, void *dev_id, struct pt_regs *regs) 113handle_IPI (int irq, void *dev_id)
112{ 114{
113 int this_cpu = get_cpu(); 115 int this_cpu = get_cpu();
114 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); 116 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
@@ -155,7 +157,11 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
155 case IPI_CPU_STOP: 157 case IPI_CPU_STOP:
156 stop_this_cpu(); 158 stop_this_cpu();
157 break; 159 break;
158 160#ifdef CONFIG_CRASH_DUMP
161 case IPI_KDUMP_CPU_STOP:
162 unw_init_running(kdump_cpu_freeze, NULL);
163 break;
164#endif
159 default: 165 default:
160 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); 166 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
161 break; 167 break;
@@ -213,6 +219,26 @@ send_IPI_self (int op)
213 send_IPI_single(smp_processor_id(), op); 219 send_IPI_single(smp_processor_id(), op);
214} 220}
215 221
222#ifdef CONFIG_CRASH_DUMP
223void
224kdump_smp_send_stop()
225{
226 send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
227}
228
229void
230kdump_smp_send_init()
231{
232 unsigned int cpu, self_cpu;
233 self_cpu = smp_processor_id();
234 for_each_online_cpu(cpu) {
235 if (cpu != self_cpu) {
236 if(kdump_status[cpu] == 0)
237 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
238 }
239 }
240}
241#endif
216/* 242/*
217 * Called with preeemption disabled. 243 * Called with preeemption disabled.
218 */ 244 */
@@ -328,10 +354,14 @@ int
328smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) 354smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
329{ 355{
330 struct call_data_struct data; 356 struct call_data_struct data;
331 int cpus = num_online_cpus()-1; 357 int cpus;
332 358
333 if (!cpus) 359 spin_lock(&call_lock);
360 cpus = num_online_cpus() - 1;
361 if (!cpus) {
362 spin_unlock(&call_lock);
334 return 0; 363 return 0;
364 }
335 365
336 /* Can deadlock when called with interrupts disabled */ 366 /* Can deadlock when called with interrupts disabled */
337 WARN_ON(irqs_disabled()); 367 WARN_ON(irqs_disabled());
@@ -343,8 +373,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
343 if (wait) 373 if (wait)
344 atomic_set(&data.finished, 0); 374 atomic_set(&data.finished, 0);
345 375
346 spin_lock(&call_lock);
347
348 call_data = &data; 376 call_data = &data;
349 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ 377 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
350 send_IPI_allbutself(IPI_CALL_FUNC); 378 send_IPI_allbutself(IPI_CALL_FUNC);
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index f7d7f5668144..b21ddecea943 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
463} 463}
464 464
465struct create_idle { 465struct create_idle {
466 struct work_struct work;
466 struct task_struct *idle; 467 struct task_struct *idle;
467 struct completion done; 468 struct completion done;
468 int cpu; 469 int cpu;
469}; 470};
470 471
471void 472void
472do_fork_idle(void *_c_idle) 473do_fork_idle(struct work_struct *work)
473{ 474{
474 struct create_idle *c_idle = _c_idle; 475 struct create_idle *c_idle =
476 container_of(work, struct create_idle, work);
475 477
476 c_idle->idle = fork_idle(c_idle->cpu); 478 c_idle->idle = fork_idle(c_idle->cpu);
477 complete(&c_idle->done); 479 complete(&c_idle->done);
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu)
482{ 484{
483 int timeout; 485 int timeout;
484 struct create_idle c_idle = { 486 struct create_idle c_idle = {
487 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
485 .cpu = cpu, 488 .cpu = cpu,
486 .done = COMPLETION_INITIALIZER(c_idle.done), 489 .done = COMPLETION_INITIALIZER(c_idle.done),
487 }; 490 };
488 DECLARE_WORK(work, do_fork_idle, &c_idle);
489 491
490 c_idle.idle = get_idle_for_cpu(cpu); 492 c_idle.idle = get_idle_for_cpu(cpu);
491 if (c_idle.idle) { 493 if (c_idle.idle) {
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu)
497 * We can't use kernel_thread since we must avoid to reschedule the child. 499 * We can't use kernel_thread since we must avoid to reschedule the child.
498 */ 500 */
499 if (!keventd_up() || current_is_keventd()) 501 if (!keventd_up() || current_is_keventd())
500 work.func(work.data); 502 c_idle.work.func(&c_idle.work);
501 else { 503 else {
502 schedule_work(&work); 504 schedule_work(&c_idle.work);
503 wait_for_completion(&c_idle.done); 505 wait_for_completion(&c_idle.done);
504 } 506 }
505 507
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 41169a9bc301..39e0cd3a0884 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -84,6 +84,12 @@ timer_interrupt (int irq, void *dev_id)
84 84
85 if (time_after(new_itm, ia64_get_itc())) 85 if (time_after(new_itm, ia64_get_itc()))
86 break; 86 break;
87
88 /*
89 * Allow IPIs to interrupt the timer loop.
90 */
91 local_irq_enable();
92 local_irq_disable();
87 } 93 }
88 94
89 do { 95 do {
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 5629b45e89c6..687500ddb4b8 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -31,11 +31,11 @@ int arch_register_cpu(int num)
31{ 31{
32#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) 32#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
33 /* 33 /*
34 * If CPEI cannot be re-targetted, and this is 34 * If CPEI can be re-targetted or if this is not
35 * CPEI target, then dont create the control file 35 * CPEI target, then it is hotpluggable
36 */ 36 */
37 if (!can_cpei_retarget() && is_cpu_cpei_target(num)) 37 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
38 sysfs_cpus[num].cpu.no_control = 1; 38 sysfs_cpus[num].cpu.hotpluggable = 1;
39 map_cpu_to_node(num, node_cpuid[num].nid); 39 map_cpu_to_node(num, node_cpuid[num].nid);
40#endif 40#endif
41 41
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index b3b2e389d6b2..d6083a0936f4 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -128,13 +128,7 @@ SECTIONS
128 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) 128 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
129 { 129 {
130 __initcall_start = .; 130 __initcall_start = .;
131 *(.initcall1.init) 131 INITCALLS
132 *(.initcall2.init)
133 *(.initcall3.init)
134 *(.initcall4.init)
135 *(.initcall5.init)
136 *(.initcall6.init)
137 *(.initcall7.init)
138 __initcall_end = .; 132 __initcall_end = .;
139 } 133 }
140 134