aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/acpi.c29
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c44
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/cyclone.c8
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/entry.S6
-rw-r--r--arch/ia64/kernel/iosapic.c165
-rw-r--r--arch/ia64/kernel/irq.c73
-rw-r--r--arch/ia64/kernel/irq_ia64.c15
-rw-r--r--arch/ia64/kernel/irq_lsapic.c23
-rw-r--r--arch/ia64/kernel/mca.c47
-rw-r--r--arch/ia64/kernel/msi_ia64.c53
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c32
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c2
-rw-r--r--arch/ia64/kernel/process.c6
-rw-r--r--arch/ia64/kernel/ptrace.c3
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c22
-rw-r--r--arch/ia64/kernel/smp.c13
-rw-r--r--arch/ia64/kernel/smpboot.c16
-rw-r--r--arch/ia64/kernel/stacktrace.c39
-rw-r--r--arch/ia64/kernel/time.c32
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/unwind.c23
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S3
27 files changed, 306 insertions, 361 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index db10b1e378b0..395c2f216dd8 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_AUDIT) += audit.o
34obj-$(CONFIG_PCI_MSI) += msi_ia64.o 34obj-$(CONFIG_PCI_MSI) += msi_ia64.o
35mca_recovery-y += mca_drv.o mca_drv_asm.o 35mca_recovery-y += mca_drv.o mca_drv_asm.o
36obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 36obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
37obj-$(CONFIG_STACKTRACE) += stacktrace.o
37 38
38obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ 39obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
39 paravirt_patch.o 40 paravirt_patch.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index c6c90f39f4d9..3be485a300b1 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -477,6 +477,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
477 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) 477 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
478 return; 478 return;
479 479
480 if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
481 printk_once(KERN_WARNING
482 "node_cpuid[%ld] is too small, may not be able to use all cpus\n",
483 ARRAY_SIZE(node_cpuid));
484 return;
485 }
480 pxm = get_processor_proximity_domain(pa); 486 pxm = get_processor_proximity_domain(pa);
481 487
482 /* record this node in proximity bitmap */ 488 /* record this node in proximity bitmap */
@@ -797,7 +803,7 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
797 * ACPI based hotplug CPU support 803 * ACPI based hotplug CPU support
798 */ 804 */
799#ifdef CONFIG_ACPI_HOTPLUG_CPU 805#ifdef CONFIG_ACPI_HOTPLUG_CPU
800static 806static __cpuinit
801int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 807int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
802{ 808{
803#ifdef CONFIG_ACPI_NUMA 809#ifdef CONFIG_ACPI_NUMA
@@ -872,7 +878,7 @@ __init void prefill_possible_map(void)
872 set_cpu_possible(i, true); 878 set_cpu_possible(i, true);
873} 879}
874 880
875int acpi_map_lsapic(acpi_handle handle, int *pcpu) 881static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
876{ 882{
877 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 883 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
878 union acpi_object *obj; 884 union acpi_object *obj;
@@ -923,6 +929,11 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
923 return (0); 929 return (0);
924} 930}
925 931
932/* wrapper to silence section mismatch warning */
933int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
934{
935 return _acpi_map_lsapic(handle, pcpu);
936}
926EXPORT_SYMBOL(acpi_map_lsapic); 937EXPORT_SYMBOL(acpi_map_lsapic);
927 938
928int acpi_unmap_lsapic(int cpu) 939int acpi_unmap_lsapic(int cpu)
@@ -1028,18 +1039,8 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
1028EXPORT_SYMBOL(acpi_unregister_ioapic); 1039EXPORT_SYMBOL(acpi_unregister_ioapic);
1029 1040
1030/* 1041/*
1031 * acpi_save_state_mem() - save kernel state 1042 * acpi_suspend_lowlevel() - save kernel state and suspend.
1032 * 1043 *
1033 * TBD when when IA64 starts to support suspend... 1044 * TBD when when IA64 starts to support suspend...
1034 */ 1045 */
1035int acpi_save_state_mem(void) { return 0; } 1046int acpi_suspend_lowlevel(void) { return 0; }
1036
1037/*
1038 * acpi_restore_state()
1039 */
1040void acpi_restore_state_mem(void) {}
1041
1042/*
1043 * do_suspend_lowlevel()
1044 */
1045void do_suspend_lowlevel(void) {}
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 22f61526a8e1..f09b174244d5 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -23,8 +23,6 @@
23#include <linux/acpi.h> 23#include <linux/acpi.h>
24#include <acpi/processor.h> 24#include <acpi/processor.h>
25 25
26#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
27
28MODULE_AUTHOR("Venkatesh Pallipadi"); 26MODULE_AUTHOR("Venkatesh Pallipadi");
29MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 27MODULE_DESCRIPTION("ACPI Processor P-States Driver");
30MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
@@ -47,12 +45,12 @@ processor_set_pstate (
47{ 45{
48 s64 retval; 46 s64 retval;
49 47
50 dprintk("processor_set_pstate\n"); 48 pr_debug("processor_set_pstate\n");
51 49
52 retval = ia64_pal_set_pstate((u64)value); 50 retval = ia64_pal_set_pstate((u64)value);
53 51
54 if (retval) { 52 if (retval) {
55 dprintk("Failed to set freq to 0x%x, with error 0x%lx\n", 53 pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
56 value, retval); 54 value, retval);
57 return -ENODEV; 55 return -ENODEV;
58 } 56 }
@@ -67,14 +65,14 @@ processor_get_pstate (
67 u64 pstate_index = 0; 65 u64 pstate_index = 0;
68 s64 retval; 66 s64 retval;
69 67
70 dprintk("processor_get_pstate\n"); 68 pr_debug("processor_get_pstate\n");
71 69
72 retval = ia64_pal_get_pstate(&pstate_index, 70 retval = ia64_pal_get_pstate(&pstate_index,
73 PAL_GET_PSTATE_TYPE_INSTANT); 71 PAL_GET_PSTATE_TYPE_INSTANT);
74 *value = (u32) pstate_index; 72 *value = (u32) pstate_index;
75 73
76 if (retval) 74 if (retval)
77 dprintk("Failed to get current freq with " 75 pr_debug("Failed to get current freq with "
78 "error 0x%lx, idx 0x%x\n", retval, *value); 76 "error 0x%lx, idx 0x%x\n", retval, *value);
79 77
80 return (int)retval; 78 return (int)retval;
@@ -90,7 +88,7 @@ extract_clock (
90{ 88{
91 unsigned long i; 89 unsigned long i;
92 90
93 dprintk("extract_clock\n"); 91 pr_debug("extract_clock\n");
94 92
95 for (i = 0; i < data->acpi_data.state_count; i++) { 93 for (i = 0; i < data->acpi_data.state_count; i++) {
96 if (value == data->acpi_data.states[i].status) 94 if (value == data->acpi_data.states[i].status)
@@ -110,7 +108,7 @@ processor_get_freq (
110 cpumask_t saved_mask; 108 cpumask_t saved_mask;
111 unsigned long clock_freq; 109 unsigned long clock_freq;
112 110
113 dprintk("processor_get_freq\n"); 111 pr_debug("processor_get_freq\n");
114 112
115 saved_mask = current->cpus_allowed; 113 saved_mask = current->cpus_allowed;
116 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 114 set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -148,7 +146,7 @@ processor_set_freq (
148 cpumask_t saved_mask; 146 cpumask_t saved_mask;
149 int retval; 147 int retval;
150 148
151 dprintk("processor_set_freq\n"); 149 pr_debug("processor_set_freq\n");
152 150
153 saved_mask = current->cpus_allowed; 151 saved_mask = current->cpus_allowed;
154 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 152 set_cpus_allowed_ptr(current, cpumask_of(cpu));
@@ -159,16 +157,16 @@ processor_set_freq (
159 157
160 if (state == data->acpi_data.state) { 158 if (state == data->acpi_data.state) {
161 if (unlikely(data->resume)) { 159 if (unlikely(data->resume)) {
162 dprintk("Called after resume, resetting to P%d\n", state); 160 pr_debug("Called after resume, resetting to P%d\n", state);
163 data->resume = 0; 161 data->resume = 0;
164 } else { 162 } else {
165 dprintk("Already at target state (P%d)\n", state); 163 pr_debug("Already at target state (P%d)\n", state);
166 retval = 0; 164 retval = 0;
167 goto migrate_end; 165 goto migrate_end;
168 } 166 }
169 } 167 }
170 168
171 dprintk("Transitioning from P%d to P%d\n", 169 pr_debug("Transitioning from P%d to P%d\n",
172 data->acpi_data.state, state); 170 data->acpi_data.state, state);
173 171
174 /* cpufreq frequency struct */ 172 /* cpufreq frequency struct */
@@ -186,7 +184,7 @@ processor_set_freq (
186 184
187 value = (u32) data->acpi_data.states[state].control; 185 value = (u32) data->acpi_data.states[state].control;
188 186
189 dprintk("Transitioning to state: 0x%08x\n", value); 187 pr_debug("Transitioning to state: 0x%08x\n", value);
190 188
191 ret = processor_set_pstate(value); 189 ret = processor_set_pstate(value);
192 if (ret) { 190 if (ret) {
@@ -219,7 +217,7 @@ acpi_cpufreq_get (
219{ 217{
220 struct cpufreq_acpi_io *data = acpi_io_data[cpu]; 218 struct cpufreq_acpi_io *data = acpi_io_data[cpu];
221 219
222 dprintk("acpi_cpufreq_get\n"); 220 pr_debug("acpi_cpufreq_get\n");
223 221
224 return processor_get_freq(data, cpu); 222 return processor_get_freq(data, cpu);
225} 223}
@@ -235,7 +233,7 @@ acpi_cpufreq_target (
235 unsigned int next_state = 0; 233 unsigned int next_state = 0;
236 unsigned int result = 0; 234 unsigned int result = 0;
237 235
238 dprintk("acpi_cpufreq_setpolicy\n"); 236 pr_debug("acpi_cpufreq_setpolicy\n");
239 237
240 result = cpufreq_frequency_table_target(policy, 238 result = cpufreq_frequency_table_target(policy,
241 data->freq_table, target_freq, relation, &next_state); 239 data->freq_table, target_freq, relation, &next_state);
@@ -255,7 +253,7 @@ acpi_cpufreq_verify (
255 unsigned int result = 0; 253 unsigned int result = 0;
256 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 254 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
257 255
258 dprintk("acpi_cpufreq_verify\n"); 256 pr_debug("acpi_cpufreq_verify\n");
259 257
260 result = cpufreq_frequency_table_verify(policy, 258 result = cpufreq_frequency_table_verify(policy,
261 data->freq_table); 259 data->freq_table);
@@ -273,7 +271,7 @@ acpi_cpufreq_cpu_init (
273 struct cpufreq_acpi_io *data; 271 struct cpufreq_acpi_io *data;
274 unsigned int result = 0; 272 unsigned int result = 0;
275 273
276 dprintk("acpi_cpufreq_cpu_init\n"); 274 pr_debug("acpi_cpufreq_cpu_init\n");
277 275
278 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 276 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
279 if (!data) 277 if (!data)
@@ -288,7 +286,7 @@ acpi_cpufreq_cpu_init (
288 286
289 /* capability check */ 287 /* capability check */
290 if (data->acpi_data.state_count <= 1) { 288 if (data->acpi_data.state_count <= 1) {
291 dprintk("No P-States\n"); 289 pr_debug("No P-States\n");
292 result = -ENODEV; 290 result = -ENODEV;
293 goto err_unreg; 291 goto err_unreg;
294 } 292 }
@@ -297,7 +295,7 @@ acpi_cpufreq_cpu_init (
297 ACPI_ADR_SPACE_FIXED_HARDWARE) || 295 ACPI_ADR_SPACE_FIXED_HARDWARE) ||
298 (data->acpi_data.status_register.space_id != 296 (data->acpi_data.status_register.space_id !=
299 ACPI_ADR_SPACE_FIXED_HARDWARE)) { 297 ACPI_ADR_SPACE_FIXED_HARDWARE)) {
300 dprintk("Unsupported address space [%d, %d]\n", 298 pr_debug("Unsupported address space [%d, %d]\n",
301 (u32) (data->acpi_data.control_register.space_id), 299 (u32) (data->acpi_data.control_register.space_id),
302 (u32) (data->acpi_data.status_register.space_id)); 300 (u32) (data->acpi_data.status_register.space_id));
303 result = -ENODEV; 301 result = -ENODEV;
@@ -348,7 +346,7 @@ acpi_cpufreq_cpu_init (
348 "activated.\n", cpu); 346 "activated.\n", cpu);
349 347
350 for (i = 0; i < data->acpi_data.state_count; i++) 348 for (i = 0; i < data->acpi_data.state_count; i++)
351 dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", 349 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
352 (i == data->acpi_data.state?'*':' '), i, 350 (i == data->acpi_data.state?'*':' '), i,
353 (u32) data->acpi_data.states[i].core_frequency, 351 (u32) data->acpi_data.states[i].core_frequency,
354 (u32) data->acpi_data.states[i].power, 352 (u32) data->acpi_data.states[i].power,
@@ -383,7 +381,7 @@ acpi_cpufreq_cpu_exit (
383{ 381{
384 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 382 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
385 383
386 dprintk("acpi_cpufreq_cpu_exit\n"); 384 pr_debug("acpi_cpufreq_cpu_exit\n");
387 385
388 if (data) { 386 if (data) {
389 cpufreq_frequency_table_put_attr(policy->cpu); 387 cpufreq_frequency_table_put_attr(policy->cpu);
@@ -418,7 +416,7 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
418static int __init 416static int __init
419acpi_cpufreq_init (void) 417acpi_cpufreq_init (void)
420{ 418{
421 dprintk("acpi_cpufreq_init\n"); 419 pr_debug("acpi_cpufreq_init\n");
422 420
423 return cpufreq_register_driver(&acpi_cpufreq_driver); 421 return cpufreq_register_driver(&acpi_cpufreq_driver);
424} 422}
@@ -427,7 +425,7 @@ acpi_cpufreq_init (void)
427static void __exit 425static void __exit
428acpi_cpufreq_exit (void) 426acpi_cpufreq_exit (void)
429{ 427{
430 dprintk("acpi_cpufreq_exit\n"); 428 pr_debug("acpi_cpufreq_exit\n");
431 429
432 cpufreq_unregister_driver(&acpi_cpufreq_driver); 430 cpufreq_unregister_driver(&acpi_cpufreq_driver);
433 return; 431 return;
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 23e91290e41f..c8c9298666fb 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -13,9 +13,6 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15 15
16/* Stores the physical address of elf header of crash image. */
17unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
18
19/** 16/**
20 * copy_oldmem_page - copy one page from "oldmem" 17 * copy_oldmem_page - copy one page from "oldmem"
21 * @pfn: page frame number to be copied 18 * @pfn: page frame number to be copied
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 71e35864d2e2..f64097b5118a 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -31,8 +31,6 @@ static struct clocksource clocksource_cyclone = {
31 .rating = 300, 31 .rating = 300,
32 .read = read_cyclone, 32 .read = read_cyclone,
33 .mask = (1LL << 40) - 1, 33 .mask = (1LL << 40) - 1,
34 .mult = 0, /*to be caluclated*/
35 .shift = 16,
36 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 34 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
37}; 35};
38 36
@@ -59,13 +57,13 @@ int __init init_cyclone_clock(void)
59 return -ENODEV; 57 return -ENODEV;
60 } 58 }
61 base = readq(reg); 59 base = readq(reg);
60 iounmap(reg);
62 if(!base){ 61 if(!base){
63 printk(KERN_ERR "Summit chipset: Could not find valid CBAR" 62 printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
64 " value.\n"); 63 " value.\n");
65 use_cyclone = 0; 64 use_cyclone = 0;
66 return -ENODEV; 65 return -ENODEV;
67 } 66 }
68 iounmap(reg);
69 67
70 /* setup PMCC */ 68 /* setup PMCC */
71 offset = (base + CYCLONE_PMCC_OFFSET); 69 offset = (base + CYCLONE_PMCC_OFFSET);
@@ -118,9 +116,7 @@ int __init init_cyclone_clock(void)
118 /* initialize last tick */ 116 /* initialize last tick */
119 cyclone_mc = cyclone_timer; 117 cyclone_mc = cyclone_timer;
120 clocksource_cyclone.fsys_mmio = cyclone_timer; 118 clocksource_cyclone.fsys_mmio = cyclone_timer;
121 clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, 119 clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ);
122 clocksource_cyclone.shift);
123 clocksource_register(&clocksource_cyclone);
124 120
125 return 0; 121 return 0;
126} 122}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index a0f001928502..6fc03aff046c 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -23,6 +23,7 @@
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/crash_dump.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/types.h> 29#include <linux/types.h>
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 244704a174de..97dd2abdeb1a 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1771,6 +1771,12 @@ sys_call_table:
1771 data8 sys_fanotify_init 1771 data8 sys_fanotify_init
1772 data8 sys_fanotify_mark 1772 data8 sys_fanotify_mark
1773 data8 sys_prlimit64 // 1325 1773 data8 sys_prlimit64 // 1325
1774 data8 sys_name_to_handle_at
1775 data8 sys_open_by_handle_at
1776 data8 sys_clock_adjtime
1777 data8 sys_syncfs
1778 data8 sys_setns // 1330
1779 data8 sys_sendmmsg
1774 1780
1775 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1781 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1776#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1782#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7ded76658d2d..b0f9afebb146 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -108,10 +108,6 @@
108#define DBG(fmt...) 108#define DBG(fmt...)
109#endif 109#endif
110 110
111#define NR_PREALLOCATE_RTE_ENTRIES \
112 (PAGE_SIZE / sizeof(struct iosapic_rte_info))
113#define RTE_PREALLOCATED (1)
114
115static DEFINE_SPINLOCK(iosapic_lock); 111static DEFINE_SPINLOCK(iosapic_lock);
116 112
117/* 113/*
@@ -136,7 +132,6 @@ struct iosapic_rte_info {
136 struct list_head rte_list; /* RTEs sharing the same vector */ 132 struct list_head rte_list; /* RTEs sharing the same vector */
137 char rte_index; /* IOSAPIC RTE index */ 133 char rte_index; /* IOSAPIC RTE index */
138 int refcnt; /* reference counter */ 134 int refcnt; /* reference counter */
139 unsigned int flags; /* flags */
140 struct iosapic *iosapic; 135 struct iosapic *iosapic;
141} ____cacheline_aligned; 136} ____cacheline_aligned;
142 137
@@ -155,9 +150,6 @@ static struct iosapic_intr_info {
155 150
156static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ 151static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
157 152
158static int iosapic_kmalloc_ok;
159static LIST_HEAD(free_rte_list);
160
161static inline void 153static inline void
162iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val) 154iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
163{ 155{
@@ -265,7 +257,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
265} 257}
266 258
267static void 259static void
268nop (unsigned int irq) 260nop (struct irq_data *data)
269{ 261{
270 /* do nothing... */ 262 /* do nothing... */
271} 263}
@@ -295,8 +287,9 @@ kexec_disable_iosapic(void)
295#endif 287#endif
296 288
297static void 289static void
298mask_irq (unsigned int irq) 290mask_irq (struct irq_data *data)
299{ 291{
292 unsigned int irq = data->irq;
300 u32 low32; 293 u32 low32;
301 int rte_index; 294 int rte_index;
302 struct iosapic_rte_info *rte; 295 struct iosapic_rte_info *rte;
@@ -313,8 +306,9 @@ mask_irq (unsigned int irq)
313} 306}
314 307
315static void 308static void
316unmask_irq (unsigned int irq) 309unmask_irq (struct irq_data *data)
317{ 310{
311 unsigned int irq = data->irq;
318 u32 low32; 312 u32 low32;
319 int rte_index; 313 int rte_index;
320 struct iosapic_rte_info *rte; 314 struct iosapic_rte_info *rte;
@@ -331,9 +325,11 @@ unmask_irq (unsigned int irq)
331 325
332 326
333static int 327static int
334iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) 328iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
329 bool force)
335{ 330{
336#ifdef CONFIG_SMP 331#ifdef CONFIG_SMP
332 unsigned int irq = data->irq;
337 u32 high32, low32; 333 u32 high32, low32;
338 int cpu, dest, rte_index; 334 int cpu, dest, rte_index;
339 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 335 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
@@ -387,31 +383,33 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
387 */ 383 */
388 384
389static unsigned int 385static unsigned int
390iosapic_startup_level_irq (unsigned int irq) 386iosapic_startup_level_irq (struct irq_data *data)
391{ 387{
392 unmask_irq(irq); 388 unmask_irq(data);
393 return 0; 389 return 0;
394} 390}
395 391
396static void 392static void
397iosapic_end_level_irq (unsigned int irq) 393iosapic_unmask_level_irq (struct irq_data *data)
398{ 394{
395 unsigned int irq = data->irq;
399 ia64_vector vec = irq_to_vector(irq); 396 ia64_vector vec = irq_to_vector(irq);
400 struct iosapic_rte_info *rte; 397 struct iosapic_rte_info *rte;
401 int do_unmask_irq = 0; 398 int do_unmask_irq = 0;
402 399
403 irq_complete_move(irq); 400 irq_complete_move(irq);
404 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 401 if (unlikely(irqd_is_setaffinity_pending(data))) {
405 do_unmask_irq = 1; 402 do_unmask_irq = 1;
406 mask_irq(irq); 403 mask_irq(data);
407 } 404 } else
405 unmask_irq(data);
408 406
409 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) 407 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
410 iosapic_eoi(rte->iosapic->addr, vec); 408 iosapic_eoi(rte->iosapic->addr, vec);
411 409
412 if (unlikely(do_unmask_irq)) { 410 if (unlikely(do_unmask_irq)) {
413 move_masked_irq(irq); 411 irq_move_masked_irq(data);
414 unmask_irq(irq); 412 unmask_irq(data);
415 } 413 }
416} 414}
417 415
@@ -421,16 +419,15 @@ iosapic_end_level_irq (unsigned int irq)
421#define iosapic_ack_level_irq nop 419#define iosapic_ack_level_irq nop
422 420
423static struct irq_chip irq_type_iosapic_level = { 421static struct irq_chip irq_type_iosapic_level = {
424 .name = "IO-SAPIC-level", 422 .name = "IO-SAPIC-level",
425 .startup = iosapic_startup_level_irq, 423 .irq_startup = iosapic_startup_level_irq,
426 .shutdown = iosapic_shutdown_level_irq, 424 .irq_shutdown = iosapic_shutdown_level_irq,
427 .enable = iosapic_enable_level_irq, 425 .irq_enable = iosapic_enable_level_irq,
428 .disable = iosapic_disable_level_irq, 426 .irq_disable = iosapic_disable_level_irq,
429 .ack = iosapic_ack_level_irq, 427 .irq_ack = iosapic_ack_level_irq,
430 .end = iosapic_end_level_irq, 428 .irq_mask = mask_irq,
431 .mask = mask_irq, 429 .irq_unmask = iosapic_unmask_level_irq,
432 .unmask = unmask_irq, 430 .irq_set_affinity = iosapic_set_affinity
433 .set_affinity = iosapic_set_affinity
434}; 431};
435 432
436/* 433/*
@@ -438,9 +435,9 @@ static struct irq_chip irq_type_iosapic_level = {
438 */ 435 */
439 436
440static unsigned int 437static unsigned int
441iosapic_startup_edge_irq (unsigned int irq) 438iosapic_startup_edge_irq (struct irq_data *data)
442{ 439{
443 unmask_irq(irq); 440 unmask_irq(data);
444 /* 441 /*
445 * IOSAPIC simply drops interrupts pended while the 442 * IOSAPIC simply drops interrupts pended while the
446 * corresponding pin was masked, so we can't know if an 443 * corresponding pin was masked, so we can't know if an
@@ -450,37 +447,25 @@ iosapic_startup_edge_irq (unsigned int irq)
450} 447}
451 448
452static void 449static void
453iosapic_ack_edge_irq (unsigned int irq) 450iosapic_ack_edge_irq (struct irq_data *data)
454{ 451{
455 struct irq_desc *idesc = irq_desc + irq; 452 irq_complete_move(data->irq);
456 453 irq_move_irq(data);
457 irq_complete_move(irq);
458 move_native_irq(irq);
459 /*
460 * Once we have recorded IRQ_PENDING already, we can mask the
461 * interrupt for real. This prevents IRQ storms from unhandled
462 * devices.
463 */
464 if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
465 (IRQ_PENDING|IRQ_DISABLED))
466 mask_irq(irq);
467} 454}
468 455
469#define iosapic_enable_edge_irq unmask_irq 456#define iosapic_enable_edge_irq unmask_irq
470#define iosapic_disable_edge_irq nop 457#define iosapic_disable_edge_irq nop
471#define iosapic_end_edge_irq nop
472 458
473static struct irq_chip irq_type_iosapic_edge = { 459static struct irq_chip irq_type_iosapic_edge = {
474 .name = "IO-SAPIC-edge", 460 .name = "IO-SAPIC-edge",
475 .startup = iosapic_startup_edge_irq, 461 .irq_startup = iosapic_startup_edge_irq,
476 .shutdown = iosapic_disable_edge_irq, 462 .irq_shutdown = iosapic_disable_edge_irq,
477 .enable = iosapic_enable_edge_irq, 463 .irq_enable = iosapic_enable_edge_irq,
478 .disable = iosapic_disable_edge_irq, 464 .irq_disable = iosapic_disable_edge_irq,
479 .ack = iosapic_ack_edge_irq, 465 .irq_ack = iosapic_ack_edge_irq,
480 .end = iosapic_end_edge_irq, 466 .irq_mask = mask_irq,
481 .mask = mask_irq, 467 .irq_unmask = unmask_irq,
482 .unmask = unmask_irq, 468 .irq_set_affinity = iosapic_set_affinity
483 .set_affinity = iosapic_set_affinity
484}; 469};
485 470
486static unsigned int 471static unsigned int
@@ -552,37 +537,6 @@ iosapic_reassign_vector (int irq)
552 } 537 }
553} 538}
554 539
555static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
556{
557 int i;
558 struct iosapic_rte_info *rte;
559 int preallocated = 0;
560
561 if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
562 rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
563 NR_PREALLOCATE_RTE_ENTRIES);
564 for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
565 list_add(&rte->rte_list, &free_rte_list);
566 }
567
568 if (!list_empty(&free_rte_list)) {
569 rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
570 rte_list);
571 list_del(&rte->rte_list);
572 preallocated++;
573 } else {
574 rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
575 if (!rte)
576 return NULL;
577 }
578
579 memset(rte, 0, sizeof(struct iosapic_rte_info));
580 if (preallocated)
581 rte->flags |= RTE_PREALLOCATED;
582
583 return rte;
584}
585
586static inline int irq_is_shared (int irq) 540static inline int irq_is_shared (int irq)
587{ 541{
588 return (iosapic_intr_info[irq].count > 1); 542 return (iosapic_intr_info[irq].count > 1);
@@ -601,8 +555,7 @@ static int
601register_intr (unsigned int gsi, int irq, unsigned char delivery, 555register_intr (unsigned int gsi, int irq, unsigned char delivery,
602 unsigned long polarity, unsigned long trigger) 556 unsigned long polarity, unsigned long trigger)
603{ 557{
604 struct irq_desc *idesc; 558 struct irq_chip *chip, *irq_type;
605 struct irq_chip *irq_type;
606 int index; 559 int index;
607 struct iosapic_rte_info *rte; 560 struct iosapic_rte_info *rte;
608 561
@@ -615,7 +568,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
615 568
616 rte = find_rte(irq, gsi); 569 rte = find_rte(irq, gsi);
617 if (!rte) { 570 if (!rte) {
618 rte = iosapic_alloc_rte(); 571 rte = kzalloc(sizeof (*rte), GFP_ATOMIC);
619 if (!rte) { 572 if (!rte) {
620 printk(KERN_WARNING "%s: cannot allocate memory\n", 573 printk(KERN_WARNING "%s: cannot allocate memory\n",
621 __func__); 574 __func__);
@@ -649,15 +602,18 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
649 602
650 irq_type = iosapic_get_irq_chip(trigger); 603 irq_type = iosapic_get_irq_chip(trigger);
651 604
652 idesc = irq_desc + irq; 605 chip = irq_get_chip(irq);
653 if (irq_type != NULL && idesc->chip != irq_type) { 606 if (irq_type != NULL && chip != irq_type) {
654 if (idesc->chip != &no_irq_chip) 607 if (chip != &no_irq_chip)
655 printk(KERN_WARNING 608 printk(KERN_WARNING
656 "%s: changing vector %d from %s to %s\n", 609 "%s: changing vector %d from %s to %s\n",
657 __func__, irq_to_vector(irq), 610 __func__, irq_to_vector(irq),
658 idesc->chip->name, irq_type->name); 611 chip->name, irq_type->name);
659 idesc->chip = irq_type; 612 chip = irq_type;
660 } 613 }
614 __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ?
615 handle_edge_irq : handle_level_irq,
616 NULL);
661 return 0; 617 return 0;
662} 618}
663 619
@@ -767,6 +723,7 @@ iosapic_register_intr (unsigned int gsi,
767 struct iosapic_rte_info *rte; 723 struct iosapic_rte_info *rte;
768 u32 low32; 724 u32 low32;
769 unsigned char dmode; 725 unsigned char dmode;
726 struct irq_desc *desc;
770 727
771 /* 728 /*
772 * If this GSI has already been registered (i.e., it's a 729 * If this GSI has already been registered (i.e., it's a
@@ -794,12 +751,13 @@ iosapic_register_intr (unsigned int gsi,
794 goto unlock_iosapic_lock; 751 goto unlock_iosapic_lock;
795 } 752 }
796 753
797 raw_spin_lock(&irq_desc[irq].lock); 754 desc = irq_to_desc(irq);
755 raw_spin_lock(&desc->lock);
798 dest = get_target_cpu(gsi, irq); 756 dest = get_target_cpu(gsi, irq);
799 dmode = choose_dmode(); 757 dmode = choose_dmode();
800 err = register_intr(gsi, irq, dmode, polarity, trigger); 758 err = register_intr(gsi, irq, dmode, polarity, trigger);
801 if (err < 0) { 759 if (err < 0) {
802 raw_spin_unlock(&irq_desc[irq].lock); 760 raw_spin_unlock(&desc->lock);
803 irq = err; 761 irq = err;
804 goto unlock_iosapic_lock; 762 goto unlock_iosapic_lock;
805 } 763 }
@@ -818,7 +776,7 @@ iosapic_register_intr (unsigned int gsi,
818 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 776 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
819 cpu_logical_id(dest), dest, irq_to_vector(irq)); 777 cpu_logical_id(dest), dest, irq_to_vector(irq));
820 778
821 raw_spin_unlock(&irq_desc[irq].lock); 779 raw_spin_unlock(&desc->lock);
822 unlock_iosapic_lock: 780 unlock_iosapic_lock:
823 spin_unlock_irqrestore(&iosapic_lock, flags); 781 spin_unlock_irqrestore(&iosapic_lock, flags);
824 return irq; 782 return irq;
@@ -829,7 +787,6 @@ iosapic_unregister_intr (unsigned int gsi)
829{ 787{
830 unsigned long flags; 788 unsigned long flags;
831 int irq, index; 789 int irq, index;
832 struct irq_desc *idesc;
833 u32 low32; 790 u32 low32;
834 unsigned long trigger, polarity; 791 unsigned long trigger, polarity;
835 unsigned int dest; 792 unsigned int dest;
@@ -859,7 +816,6 @@ iosapic_unregister_intr (unsigned int gsi)
859 if (--rte->refcnt > 0) 816 if (--rte->refcnt > 0)
860 goto out; 817 goto out;
861 818
862 idesc = irq_desc + irq;
863 rte->refcnt = NO_REF_RTE; 819 rte->refcnt = NO_REF_RTE;
864 820
865 /* Mask the interrupt */ 821 /* Mask the interrupt */
@@ -883,7 +839,7 @@ iosapic_unregister_intr (unsigned int gsi)
883 if (iosapic_intr_info[irq].count == 0) { 839 if (iosapic_intr_info[irq].count == 0) {
884#ifdef CONFIG_SMP 840#ifdef CONFIG_SMP
885 /* Clear affinity */ 841 /* Clear affinity */
886 cpumask_setall(idesc->affinity); 842 cpumask_setall(irq_get_irq_data(irq)->affinity);
887#endif 843#endif
888 /* Clear the interrupt information */ 844 /* Clear the interrupt information */
889 iosapic_intr_info[irq].dest = 0; 845 iosapic_intr_info[irq].dest = 0;
@@ -1161,10 +1117,3 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
1161 return; 1117 return;
1162} 1118}
1163#endif 1119#endif
1164
1165static int __init iosapic_enable_kmalloc (void)
1166{
1167 iosapic_kmalloc_ok = 1;
1168 return 0;
1169}
1170core_initcall (iosapic_enable_kmalloc);
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 94ee9d067cbd..ad69606613eb 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -53,47 +53,9 @@ atomic_t irq_err_count;
53/* 53/*
54 * /proc/interrupts printing: 54 * /proc/interrupts printing:
55 */ 55 */
56 56int arch_show_interrupts(struct seq_file *p, int prec)
57int show_interrupts(struct seq_file *p, void *v)
58{ 57{
59 int i = *(loff_t *) v, j; 58 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
60 struct irqaction * action;
61 unsigned long flags;
62
63 if (i == 0) {
64 char cpuname[16];
65 seq_printf(p, " ");
66 for_each_online_cpu(j) {
67 snprintf(cpuname, 10, "CPU%d", j);
68 seq_printf(p, "%10s ", cpuname);
69 }
70 seq_putc(p, '\n');
71 }
72
73 if (i < NR_IRQS) {
74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
75 action = irq_desc[i].action;
76 if (!action)
77 goto skip;
78 seq_printf(p, "%3d: ",i);
79#ifndef CONFIG_SMP
80 seq_printf(p, "%10u ", kstat_irqs(i));
81#else
82 for_each_online_cpu(j) {
83 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
84 }
85#endif
86 seq_printf(p, " %14s", irq_desc[i].chip->name);
87 seq_printf(p, " %s", action->name);
88
89 for (action=action->next; action; action = action->next)
90 seq_printf(p, ", %s", action->name);
91
92 seq_putc(p, '\n');
93skip:
94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
95 } else if (i == NR_IRQS)
96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97 return 0; 59 return 0;
98} 60}
99 61
@@ -103,7 +65,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
103void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 65void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
104{ 66{
105 if (irq < NR_IRQS) { 67 if (irq < NR_IRQS) {
106 cpumask_copy(irq_desc[irq].affinity, 68 cpumask_copy(irq_get_irq_data(irq)->affinity,
107 cpumask_of(cpu_logical_id(hwid))); 69 cpumask_of(cpu_logical_id(hwid)));
108 irq_redir[irq] = (char) (redir & 0xff); 70 irq_redir[irq] = (char) (redir & 0xff);
109 } 71 }
@@ -130,13 +92,14 @@ unsigned int vectors_in_migration[NR_IRQS];
130 */ 92 */
131static void migrate_irqs(void) 93static void migrate_irqs(void)
132{ 94{
133 struct irq_desc *desc;
134 int irq, new_cpu; 95 int irq, new_cpu;
135 96
136 for (irq=0; irq < NR_IRQS; irq++) { 97 for (irq=0; irq < NR_IRQS; irq++) {
137 desc = irq_desc + irq; 98 struct irq_desc *desc = irq_to_desc(irq);
99 struct irq_data *data = irq_desc_get_irq_data(desc);
100 struct irq_chip *chip = irq_data_get_irq_chip(data);
138 101
139 if (desc->status == IRQ_DISABLED) 102 if (irqd_irq_disabled(data))
140 continue; 103 continue;
141 104
142 /* 105 /*
@@ -145,10 +108,10 @@ static void migrate_irqs(void)
145 * tell CPU not to respond to these local intr sources. 108 * tell CPU not to respond to these local intr sources.
146 * such as ITV,CPEI,MCA etc. 109 * such as ITV,CPEI,MCA etc.
147 */ 110 */
148 if (desc->status == IRQ_PER_CPU) 111 if (irqd_is_per_cpu(data))
149 continue; 112 continue;
150 113
151 if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) 114 if (cpumask_any_and(data->affinity, cpu_online_mask)
152 >= nr_cpu_ids) { 115 >= nr_cpu_ids) {
153 /* 116 /*
154 * Save it for phase 2 processing 117 * Save it for phase 2 processing
@@ -160,16 +123,16 @@ static void migrate_irqs(void)
160 /* 123 /*
161 * Al three are essential, currently WARN_ON.. maybe panic? 124 * Al three are essential, currently WARN_ON.. maybe panic?
162 */ 125 */
163 if (desc->chip && desc->chip->disable && 126 if (chip && chip->irq_disable &&
164 desc->chip->enable && desc->chip->set_affinity) { 127 chip->irq_enable && chip->irq_set_affinity) {
165 desc->chip->disable(irq); 128 chip->irq_disable(data);
166 desc->chip->set_affinity(irq, 129 chip->irq_set_affinity(data,
167 cpumask_of(new_cpu)); 130 cpumask_of(new_cpu), false);
168 desc->chip->enable(irq); 131 chip->irq_enable(data);
169 } else { 132 } else {
170 WARN_ON((!(desc->chip) || !(desc->chip->disable) || 133 WARN_ON((!chip || !chip->irq_disable ||
171 !(desc->chip->enable) || 134 !chip->irq_enable ||
172 !(desc->chip->set_affinity))); 135 !chip->irq_set_affinity));
173 } 136 }
174 } 137 }
175 } 138 }
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index f14c35f9b03a..782c3a357f24 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -30,6 +30,8 @@
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/ratelimit.h> 32#include <linux/ratelimit.h>
33#include <linux/acpi.h>
34#include <linux/sched.h>
33 35
34#include <asm/delay.h> 36#include <asm/delay.h>
35#include <asm/intrinsics.h> 37#include <asm/intrinsics.h>
@@ -342,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
342 if (irq < 0) 344 if (irq < 0)
343 continue; 345 continue;
344 346
345 desc = irq_desc + irq; 347 desc = irq_to_desc(irq);
346 cfg = irq_cfg + irq; 348 cfg = irq_cfg + irq;
347 raw_spin_lock(&desc->lock); 349 raw_spin_lock(&desc->lock);
348 if (!cfg->move_cleanup_count) 350 if (!cfg->move_cleanup_count)
@@ -495,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
495 smp_local_flush_tlb(); 497 smp_local_flush_tlb();
496 kstat_incr_irqs_this_cpu(irq, desc); 498 kstat_incr_irqs_this_cpu(irq, desc);
497 } else if (unlikely(IS_RESCHEDULE(vector))) { 499 } else if (unlikely(IS_RESCHEDULE(vector))) {
500 scheduler_ipi();
498 kstat_incr_irqs_this_cpu(irq, desc); 501 kstat_incr_irqs_this_cpu(irq, desc);
499 } else { 502 } else {
500 ia64_setreg(_IA64_REG_CR_TPR, vector); 503 ia64_setreg(_IA64_REG_CR_TPR, vector);
@@ -625,16 +628,15 @@ static struct irqaction tlb_irqaction = {
625void 628void
626ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) 629ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
627{ 630{
628 struct irq_desc *desc;
629 unsigned int irq; 631 unsigned int irq;
630 632
631 irq = vec; 633 irq = vec;
632 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); 634 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
633 desc = irq_desc + irq; 635 irq_set_status_flags(irq, IRQ_PER_CPU);
634 desc->status |= IRQ_PER_CPU; 636 irq_set_chip(irq, &irq_type_ia64_lsapic);
635 desc->chip = &irq_type_ia64_lsapic;
636 if (action) 637 if (action)
637 setup_irq(irq, action); 638 setup_irq(irq, action);
639 irq_set_handler(irq, handle_percpu_irq);
638} 640}
639 641
640void __init 642void __init
@@ -650,6 +652,9 @@ ia64_native_register_ipi(void)
650void __init 652void __init
651init_IRQ (void) 653init_IRQ (void)
652{ 654{
655#ifdef CONFIG_ACPI
656 acpi_boot_init();
657#endif
653 ia64_register_ipi(); 658 ia64_register_ipi();
654 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 659 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
655#ifdef CONFIG_SMP 660#ifdef CONFIG_SMP
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index fc1549d4564d..1b3a776e5161 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -15,31 +15,30 @@
15#include <linux/irq.h> 15#include <linux/irq.h>
16 16
17static unsigned int 17static unsigned int
18lsapic_noop_startup (unsigned int irq) 18lsapic_noop_startup (struct irq_data *data)
19{ 19{
20 return 0; 20 return 0;
21} 21}
22 22
23static void 23static void
24lsapic_noop (unsigned int irq) 24lsapic_noop (struct irq_data *data)
25{ 25{
26 /* nothing to do... */ 26 /* nothing to do... */
27} 27}
28 28
29static int lsapic_retrigger(unsigned int irq) 29static int lsapic_retrigger(struct irq_data *data)
30{ 30{
31 ia64_resend_irq(irq); 31 ia64_resend_irq(data->irq);
32 32
33 return 1; 33 return 1;
34} 34}
35 35
36struct irq_chip irq_type_ia64_lsapic = { 36struct irq_chip irq_type_ia64_lsapic = {
37 .name = "LSAPIC", 37 .name = "LSAPIC",
38 .startup = lsapic_noop_startup, 38 .irq_startup = lsapic_noop_startup,
39 .shutdown = lsapic_noop, 39 .irq_shutdown = lsapic_noop,
40 .enable = lsapic_noop, 40 .irq_enable = lsapic_noop,
41 .disable = lsapic_noop, 41 .irq_disable = lsapic_noop,
42 .ack = lsapic_noop, 42 .irq_ack = lsapic_noop,
43 .end = lsapic_noop, 43 .irq_retrigger = lsapic_retrigger,
44 .retrigger = lsapic_retrigger,
45}; 44};
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index a0220dc5ff42..84fb405eee87 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -582,6 +582,8 @@ out:
582 /* Get the CPE error record and log it */ 582 /* Get the CPE error record and log it */
583 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); 583 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
584 584
585 local_irq_disable();
586
585 return IRQ_HANDLED; 587 return IRQ_HANDLED;
586} 588}
587 589
@@ -1859,7 +1861,8 @@ ia64_mca_cpu_init(void *cpu_data)
1859 data = mca_bootmem(); 1861 data = mca_bootmem();
1860 first_time = 0; 1862 first_time = 0;
1861 } else 1863 } else
1862 data = __get_free_pages(GFP_KERNEL, get_order(sz)); 1864 data = (void *)__get_free_pages(GFP_KERNEL,
1865 get_order(sz));
1863 if (!data) 1866 if (!data)
1864 panic("Could not allocate MCA memory for cpu %d\n", 1867 panic("Could not allocate MCA memory for cpu %d\n",
1865 cpu); 1868 cpu);
@@ -2055,25 +2058,6 @@ ia64_mca_init(void)
2055 2058
2056 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); 2059 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
2057 2060
2058 /*
2059 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2060 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2061 */
2062 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2063 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2064 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
2065
2066 /* Setup the MCA rendezvous interrupt vector */
2067 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2068
2069 /* Setup the MCA wakeup interrupt vector */
2070 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2071
2072#ifdef CONFIG_ACPI
2073 /* Setup the CPEI/P handler */
2074 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2075#endif
2076
2077 /* Initialize the areas set aside by the OS to buffer the 2061 /* Initialize the areas set aside by the OS to buffer the
2078 * platform/processor error states for MCA/INIT/CMC 2062 * platform/processor error states for MCA/INIT/CMC
2079 * handling. 2063 * handling.
@@ -2103,6 +2087,25 @@ ia64_mca_late_init(void)
2103 if (!mca_init) 2087 if (!mca_init)
2104 return 0; 2088 return 0;
2105 2089
2090 /*
2091 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2092 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2093 */
2094 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2095 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2096 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
2097
2098 /* Setup the MCA rendezvous interrupt vector */
2099 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2100
2101 /* Setup the MCA wakeup interrupt vector */
2102 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2103
2104#ifdef CONFIG_ACPI
2105 /* Setup the CPEI/P handler */
2106 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2107#endif
2108
2106 register_hotcpu_notifier(&mca_cpu_notifier); 2109 register_hotcpu_notifier(&mca_cpu_notifier);
2107 2110
2108 /* Setup the CMCI/P vector and handler */ 2111 /* Setup the CMCI/P vector and handler */
@@ -2122,7 +2125,6 @@ ia64_mca_late_init(void)
2122 cpe_poll_timer.function = ia64_mca_cpe_poll; 2125 cpe_poll_timer.function = ia64_mca_cpe_poll;
2123 2126
2124 { 2127 {
2125 struct irq_desc *desc;
2126 unsigned int irq; 2128 unsigned int irq;
2127 2129
2128 if (cpe_vector >= 0) { 2130 if (cpe_vector >= 0) {
@@ -2130,8 +2132,7 @@ ia64_mca_late_init(void)
2130 irq = local_vector_to_irq(cpe_vector); 2132 irq = local_vector_to_irq(cpe_vector);
2131 if (irq > 0) { 2133 if (irq > 0) {
2132 cpe_poll_enabled = 0; 2134 cpe_poll_enabled = 0;
2133 desc = irq_desc + irq; 2135 irq_set_status_flags(irq, IRQ_PER_CPU);
2134 desc->status |= IRQ_PER_CPU;
2135 setup_irq(irq, &mca_cpe_irqaction); 2136 setup_irq(irq, &mca_cpe_irqaction);
2136 ia64_cpe_irq = irq; 2137 ia64_cpe_irq = irq;
2137 ia64_mca_register_cpev(cpe_vector); 2138 ia64_mca_register_cpev(cpe_vector);
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 4a746ea838ff..009df5434a7a 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -12,12 +12,13 @@
12static struct irq_chip ia64_msi_chip; 12static struct irq_chip ia64_msi_chip;
13 13
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15static int ia64_set_msi_irq_affinity(unsigned int irq, 15static int ia64_set_msi_irq_affinity(struct irq_data *idata,
16 const cpumask_t *cpu_mask) 16 const cpumask_t *cpu_mask, bool force)
17{ 17{
18 struct msi_msg msg; 18 struct msi_msg msg;
19 u32 addr, data; 19 u32 addr, data;
20 int cpu = first_cpu(*cpu_mask); 20 int cpu = first_cpu(*cpu_mask);
21 unsigned int irq = idata->irq;
21 22
22 if (!cpu_online(cpu)) 23 if (!cpu_online(cpu))
23 return -1; 24 return -1;
@@ -38,7 +39,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq,
38 msg.data = data; 39 msg.data = data;
39 40
40 write_msi_msg(irq, &msg); 41 write_msi_msg(irq, &msg);
41 cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 42 cpumask_copy(idata->affinity, cpumask_of(cpu));
42 43
43 return 0; 44 return 0;
44} 45}
@@ -55,7 +56,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
55 if (irq < 0) 56 if (irq < 0)
56 return irq; 57 return irq;
57 58
58 set_irq_msi(irq, desc); 59 irq_set_msi_desc(irq, desc);
59 cpus_and(mask, irq_to_domain(irq), cpu_online_map); 60 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
60 dest_phys_id = cpu_physical_id(first_cpu(mask)); 61 dest_phys_id = cpu_physical_id(first_cpu(mask));
61 vector = irq_to_vector(irq); 62 vector = irq_to_vector(irq);
@@ -74,7 +75,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
74 MSI_DATA_VECTOR(vector); 75 MSI_DATA_VECTOR(vector);
75 76
76 write_msi_msg(irq, &msg); 77 write_msi_msg(irq, &msg);
77 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 78 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
78 79
79 return 0; 80 return 0;
80} 81}
@@ -84,16 +85,16 @@ void ia64_teardown_msi_irq(unsigned int irq)
84 destroy_irq(irq); 85 destroy_irq(irq);
85} 86}
86 87
87static void ia64_ack_msi_irq(unsigned int irq) 88static void ia64_ack_msi_irq(struct irq_data *data)
88{ 89{
89 irq_complete_move(irq); 90 irq_complete_move(data->irq);
90 move_native_irq(irq); 91 irq_move_irq(data);
91 ia64_eoi(); 92 ia64_eoi();
92} 93}
93 94
94static int ia64_msi_retrigger_irq(unsigned int irq) 95static int ia64_msi_retrigger_irq(struct irq_data *data)
95{ 96{
96 unsigned int vector = irq_to_vector(irq); 97 unsigned int vector = irq_to_vector(data->irq);
97 ia64_resend_irq(vector); 98 ia64_resend_irq(vector);
98 99
99 return 1; 100 return 1;
@@ -103,14 +104,14 @@ static int ia64_msi_retrigger_irq(unsigned int irq)
103 * Generic ops used on most IA64 platforms. 104 * Generic ops used on most IA64 platforms.
104 */ 105 */
105static struct irq_chip ia64_msi_chip = { 106static struct irq_chip ia64_msi_chip = {
106 .name = "PCI-MSI", 107 .name = "PCI-MSI",
107 .mask = mask_msi_irq, 108 .irq_mask = mask_msi_irq,
108 .unmask = unmask_msi_irq, 109 .irq_unmask = unmask_msi_irq,
109 .ack = ia64_ack_msi_irq, 110 .irq_ack = ia64_ack_msi_irq,
110#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
111 .set_affinity = ia64_set_msi_irq_affinity, 112 .irq_set_affinity = ia64_set_msi_irq_affinity,
112#endif 113#endif
113 .retrigger = ia64_msi_retrigger_irq, 114 .irq_retrigger = ia64_msi_retrigger_irq,
114}; 115};
115 116
116 117
@@ -132,8 +133,10 @@ void arch_teardown_msi_irq(unsigned int irq)
132 133
133#ifdef CONFIG_DMAR 134#ifdef CONFIG_DMAR
134#ifdef CONFIG_SMP 135#ifdef CONFIG_SMP
135static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 136static int dmar_msi_set_affinity(struct irq_data *data,
137 const struct cpumask *mask, bool force)
136{ 138{
139 unsigned int irq = data->irq;
137 struct irq_cfg *cfg = irq_cfg + irq; 140 struct irq_cfg *cfg = irq_cfg + irq;
138 struct msi_msg msg; 141 struct msi_msg msg;
139 int cpu = cpumask_first(mask); 142 int cpu = cpumask_first(mask);
@@ -152,7 +155,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
152 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); 155 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
153 156
154 dmar_msi_write(irq, &msg); 157 dmar_msi_write(irq, &msg);
155 cpumask_copy(irq_desc[irq].affinity, mask); 158 cpumask_copy(data->affinity, mask);
156 159
157 return 0; 160 return 0;
158} 161}
@@ -160,13 +163,13 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
160 163
161static struct irq_chip dmar_msi_type = { 164static struct irq_chip dmar_msi_type = {
162 .name = "DMAR_MSI", 165 .name = "DMAR_MSI",
163 .unmask = dmar_msi_unmask, 166 .irq_unmask = dmar_msi_unmask,
164 .mask = dmar_msi_mask, 167 .irq_mask = dmar_msi_mask,
165 .ack = ia64_ack_msi_irq, 168 .irq_ack = ia64_ack_msi_irq,
166#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
167 .set_affinity = dmar_msi_set_affinity, 170 .irq_set_affinity = dmar_msi_set_affinity,
168#endif 171#endif
169 .retrigger = ia64_msi_retrigger_irq, 172 .irq_retrigger = ia64_msi_retrigger_irq,
170}; 173};
171 174
172static int 175static int
@@ -203,8 +206,8 @@ int arch_setup_dmar_msi(unsigned int irq)
203 if (ret < 0) 206 if (ret < 0)
204 return ret; 207 return ret;
205 dmar_msi_write(irq, &msg); 208 dmar_msi_write(irq, &msg);
206 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 209 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
207 "edge"); 210 "edge");
208 return 0; 211 return 0;
209} 212}
210#endif /* CONFIG_DMAR */ 213#endif /* CONFIG_DMAR */
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index fdf6f9d013e5..77597e5ea60a 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -434,7 +434,7 @@ register_info(char *page)
434 unsigned long phys_stacked; 434 unsigned long phys_stacked;
435 pal_hints_u_t hints; 435 pal_hints_u_t hints;
436 unsigned long iregs, dregs; 436 unsigned long iregs, dregs;
437 char *info_type[]={ 437 static const char * const info_type[] = {
438 "Implemented AR(s)", 438 "Implemented AR(s)",
439 "AR(s) with read side-effects", 439 "AR(s) with read side-effects",
440 "Implemented CR(s)", 440 "Implemented CR(s)",
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index cce050e85c73..89accc626b86 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -617,17 +617,19 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
617 return get_unmapped_area(file, addr, len, pgoff, flags); 617 return get_unmapped_area(file, addr, len, pgoff, flags);
618} 618}
619 619
620/* forward declaration */
621static const struct dentry_operations pfmfs_dentry_operations;
620 622
621static int 623static struct dentry *
622pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, 624pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
623 struct vfsmount *mnt)
624{ 625{
625 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt); 626 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
627 PFMFS_MAGIC);
626} 628}
627 629
628static struct file_system_type pfm_fs_type = { 630static struct file_system_type pfm_fs_type = {
629 .name = "pfmfs", 631 .name = "pfmfs",
630 .get_sb = pfmfs_get_sb, 632 .mount = pfmfs_mount,
631 .kill_sb = kill_anon_super, 633 .kill_sb = kill_anon_super,
632}; 634};
633 635
@@ -830,10 +832,9 @@ pfm_rvmalloc(unsigned long size)
830 unsigned long addr; 832 unsigned long addr;
831 833
832 size = PAGE_ALIGN(size); 834 size = PAGE_ALIGN(size);
833 mem = vmalloc(size); 835 mem = vzalloc(size);
834 if (mem) { 836 if (mem) {
835 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); 837 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
836 memset(mem, 0, size);
837 addr = (unsigned long)mem; 838 addr = (unsigned long)mem;
838 while (size > 0) { 839 while (size > 0) {
839 pfm_reserve_page(addr); 840 pfm_reserve_page(addr);
@@ -1543,7 +1544,7 @@ pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1543 * any operations on the root directory. However, we need a non-trivial 1544 * any operations on the root directory. However, we need a non-trivial
1544 * d_name - pfm: will go nicely and kill the special-casing in procfs. 1545 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1545 */ 1546 */
1546static struct vfsmount *pfmfs_mnt; 1547static struct vfsmount *pfmfs_mnt __read_mostly;
1547 1548
1548static int __init 1549static int __init
1549init_pfm_fs(void) 1550init_pfm_fs(void)
@@ -1573,7 +1574,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1573 return -EINVAL; 1574 return -EINVAL;
1574 } 1575 }
1575 1576
1576 ctx = (pfm_context_t *)filp->private_data; 1577 ctx = filp->private_data;
1577 if (ctx == NULL) { 1578 if (ctx == NULL) {
1578 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); 1579 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1579 return -EINVAL; 1580 return -EINVAL;
@@ -1673,7 +1674,7 @@ pfm_poll(struct file *filp, poll_table * wait)
1673 return 0; 1674 return 0;
1674 } 1675 }
1675 1676
1676 ctx = (pfm_context_t *)filp->private_data; 1677 ctx = filp->private_data;
1677 if (ctx == NULL) { 1678 if (ctx == NULL) {
1678 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); 1679 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1679 return 0; 1680 return 0;
@@ -1733,7 +1734,7 @@ pfm_fasync(int fd, struct file *filp, int on)
1733 return -EBADF; 1734 return -EBADF;
1734 } 1735 }
1735 1736
1736 ctx = (pfm_context_t *)filp->private_data; 1737 ctx = filp->private_data;
1737 if (ctx == NULL) { 1738 if (ctx == NULL) {
1738 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); 1739 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1739 return -EBADF; 1740 return -EBADF;
@@ -1841,7 +1842,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
1841 return -EBADF; 1842 return -EBADF;
1842 } 1843 }
1843 1844
1844 ctx = (pfm_context_t *)filp->private_data; 1845 ctx = filp->private_data;
1845 if (ctx == NULL) { 1846 if (ctx == NULL) {
1846 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); 1847 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1847 return -EBADF; 1848 return -EBADF;
@@ -1984,7 +1985,7 @@ pfm_close(struct inode *inode, struct file *filp)
1984 return -EBADF; 1985 return -EBADF;
1985 } 1986 }
1986 1987
1987 ctx = (pfm_context_t *)filp->private_data; 1988 ctx = filp->private_data;
1988 if (ctx == NULL) { 1989 if (ctx == NULL) {
1989 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); 1990 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1990 return -EBADF; 1991 return -EBADF;
@@ -2186,7 +2187,7 @@ static const struct file_operations pfm_file_ops = {
2186}; 2187};
2187 2188
2188static int 2189static int
2189pfmfs_delete_dentry(struct dentry *dentry) 2190pfmfs_delete_dentry(const struct dentry *dentry)
2190{ 2191{
2191 return 1; 2192 return 1;
2192} 2193}
@@ -2234,7 +2235,6 @@ pfm_alloc_file(pfm_context_t *ctx)
2234 } 2235 }
2235 path.mnt = mntget(pfmfs_mnt); 2236 path.mnt = mntget(pfmfs_mnt);
2236 2237
2237 path.dentry->d_op = &pfmfs_dentry_operations;
2238 d_add(path.dentry, inode); 2238 d_add(path.dentry, inode);
2239 2239
2240 file = alloc_file(&path, FMODE_READ, &pfm_file_ops); 2240 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
@@ -4907,7 +4907,7 @@ restart_args:
4907 goto error_args; 4907 goto error_args;
4908 } 4908 }
4909 4909
4910 ctx = (pfm_context_t *)file->private_data; 4910 ctx = file->private_data;
4911 if (unlikely(ctx == NULL)) { 4911 if (unlikely(ctx == NULL)) {
4912 DPRINT(("no context for fd %d\n", fd)); 4912 DPRINT(("no context for fd %d\n", fd));
4913 goto error_args; 4913 goto error_args;
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index 5f637bbfcccd..30c644ea44c9 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -150,7 +150,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
150 * current = task running at the time of the overflow. 150 * current = task running at the time of the overflow.
151 * 151 *
152 * per-task mode: 152 * per-task mode:
153 * - this is ususally the task being monitored. 153 * - this is usually the task being monitored.
154 * Under certain conditions, it might be a different task 154 * Under certain conditions, it might be a different task
155 * 155 *
156 * system-wide: 156 * system-wide:
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 16f1c7b04c69..6d33c5cc94f0 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -53,12 +53,8 @@
53 53
54void (*ia64_mark_idle)(int); 54void (*ia64_mark_idle)(int);
55 55
56unsigned long boot_option_idle_override = 0; 56unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
57EXPORT_SYMBOL(boot_option_idle_override); 57EXPORT_SYMBOL(boot_option_idle_override);
58unsigned long idle_halt;
59EXPORT_SYMBOL(idle_halt);
60unsigned long idle_nomwait;
61EXPORT_SYMBOL(idle_nomwait);
62void (*pm_idle) (void); 58void (*pm_idle) (void);
63EXPORT_SYMBOL(pm_idle); 59EXPORT_SYMBOL(pm_idle);
64void (*pm_power_off) (void); 60void (*pm_power_off) (void);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 7c7909f9bc93..8848f43d819e 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1177,7 +1177,8 @@ ptrace_disable (struct task_struct *child)
1177} 1177}
1178 1178
1179long 1179long
1180arch_ptrace (struct task_struct *child, long request, long addr, long data) 1180arch_ptrace (struct task_struct *child, long request,
1181 unsigned long addr, unsigned long data)
1181{ 1182{
1182 switch (request) { 1183 switch (request) {
1183 case PTRACE_PEEKTEXT: 1184 case PTRACE_PEEKTEXT:
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index aa8b5fa1a8de..79802e540e53 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -354,6 +354,7 @@ retry:
354static const struct file_operations salinfo_event_fops = { 354static const struct file_operations salinfo_event_fops = {
355 .open = salinfo_event_open, 355 .open = salinfo_event_open,
356 .read = salinfo_event_read, 356 .read = salinfo_event_read,
357 .llseek = noop_llseek,
357}; 358};
358 359
359static int 360static int
@@ -571,6 +572,7 @@ static const struct file_operations salinfo_data_fops = {
571 .release = salinfo_log_release, 572 .release = salinfo_log_release,
572 .read = salinfo_log_read, 573 .read = salinfo_log_read,
573 .write = salinfo_log_write, 574 .write = salinfo_log_write,
575 .llseek = default_llseek,
574}; 576};
575 577
576static int __cpuinit 578static int __cpuinit
@@ -642,7 +644,7 @@ salinfo_init(void)
642 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 644 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
643 data = salinfo_data + i; 645 data = salinfo_data + i;
644 data->type = i; 646 data->type = i;
645 init_MUTEX(&data->mutex); 647 sema_init(&data->mutex, 1);
646 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 648 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
647 if (!dir) 649 if (!dir)
648 continue; 650 continue;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 8fb958abf8d0..5e2c72498c51 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -479,25 +479,7 @@ static __init int setup_nomca(char *s)
479} 479}
480early_param("nomca", setup_nomca); 480early_param("nomca", setup_nomca);
481 481
482/*
483 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
484 * is_kdump_kernel() to determine if we are booting after a panic. Hence
485 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
486 */
487#ifdef CONFIG_CRASH_DUMP 482#ifdef CONFIG_CRASH_DUMP
488/* elfcorehdr= specifies the location of elf core header
489 * stored by the crashed kernel.
490 */
491static int __init parse_elfcorehdr(char *arg)
492{
493 if (!arg)
494 return -EINVAL;
495
496 elfcorehdr_addr = memparse(arg, &arg);
497 return 0;
498}
499early_param("elfcorehdr", parse_elfcorehdr);
500
501int __init reserve_elfcorehdr(u64 *start, u64 *end) 483int __init reserve_elfcorehdr(u64 *start, u64 *end)
502{ 484{
503 u64 length; 485 u64 length;
@@ -594,10 +576,6 @@ setup_arch (char **cmdline_p)
594 cpu_init(); /* initialize the bootstrap CPU */ 576 cpu_init(); /* initialize the bootstrap CPU */
595 mmu_context_init(); /* initialize context_id bitmap */ 577 mmu_context_init(); /* initialize context_id bitmap */
596 578
597#ifdef CONFIG_ACPI
598 acpi_boot_init();
599#endif
600
601 paravirt_banner(); 579 paravirt_banner();
602 paravirt_arch_setup_console(cmdline_p); 580 paravirt_arch_setup_console(cmdline_p);
603 581
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index dabeefe21134..be450a3e9871 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -293,6 +293,7 @@ smp_flush_tlb_all (void)
293void 293void
294smp_flush_tlb_mm (struct mm_struct *mm) 294smp_flush_tlb_mm (struct mm_struct *mm)
295{ 295{
296 cpumask_var_t cpus;
296 preempt_disable(); 297 preempt_disable();
297 /* this happens for the common case of a single-threaded fork(): */ 298 /* this happens for the common case of a single-threaded fork(): */
298 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) 299 if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
@@ -301,9 +302,15 @@ smp_flush_tlb_mm (struct mm_struct *mm)
301 preempt_enable(); 302 preempt_enable();
302 return; 303 return;
303 } 304 }
304 305 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
305 smp_call_function_many(mm_cpumask(mm), 306 smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
306 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); 307 mm, 1);
308 } else {
309 cpumask_copy(cpus, mm_cpumask(mm));
310 smp_call_function_many(cpus,
311 (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
312 free_cpumask_var(cpus);
313 }
307 local_irq_disable(); 314 local_irq_disable();
308 local_finish_flush_tlb_mm(mm); 315 local_finish_flush_tlb_mm(mm);
309 local_irq_enable(); 316 local_irq_enable();
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index d003b502a432..14ec641003da 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -677,7 +677,7 @@ extern void fixup_irqs(void);
677int migrate_platform_irqs(unsigned int cpu) 677int migrate_platform_irqs(unsigned int cpu)
678{ 678{
679 int new_cpei_cpu; 679 int new_cpei_cpu;
680 struct irq_desc *desc = NULL; 680 struct irq_data *data = NULL;
681 const struct cpumask *mask; 681 const struct cpumask *mask;
682 int retval = 0; 682 int retval = 0;
683 683
@@ -693,20 +693,20 @@ int migrate_platform_irqs(unsigned int cpu)
693 new_cpei_cpu = any_online_cpu(cpu_online_map); 693 new_cpei_cpu = any_online_cpu(cpu_online_map);
694 mask = cpumask_of(new_cpei_cpu); 694 mask = cpumask_of(new_cpei_cpu);
695 set_cpei_target_cpu(new_cpei_cpu); 695 set_cpei_target_cpu(new_cpei_cpu);
696 desc = irq_desc + ia64_cpe_irq; 696 data = irq_get_irq_data(ia64_cpe_irq);
697 /* 697 /*
698 * Switch for now, immediately, we need to do fake intr 698 * Switch for now, immediately, we need to do fake intr
699 * as other interrupts, but need to study CPEI behaviour with 699 * as other interrupts, but need to study CPEI behaviour with
700 * polling before making changes. 700 * polling before making changes.
701 */ 701 */
702 if (desc) { 702 if (data && data->chip) {
703 desc->chip->disable(ia64_cpe_irq); 703 data->chip->irq_disable(data);
704 desc->chip->set_affinity(ia64_cpe_irq, mask); 704 data->chip->irq_set_affinity(data, mask, false);
705 desc->chip->enable(ia64_cpe_irq); 705 data->chip->irq_enable(data);
706 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); 706 printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
707 } 707 }
708 } 708 }
709 if (!desc) { 709 if (!data) {
710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); 710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
711 retval = -EBUSY; 711 retval = -EBUSY;
712 } 712 }
diff --git a/arch/ia64/kernel/stacktrace.c b/arch/ia64/kernel/stacktrace.c
new file mode 100644
index 000000000000..5af2783a87f4
--- /dev/null
+++ b/arch/ia64/kernel/stacktrace.c
@@ -0,0 +1,39 @@
1/*
2 * arch/ia64/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 */
7#include <linux/sched.h>
8#include <linux/stacktrace.h>
9#include <linux/module.h>
10
11static void
12ia64_do_save_stack(struct unw_frame_info *info, void *arg)
13{
14 struct stack_trace *trace = arg;
15 unsigned long ip;
16 int skip = trace->skip;
17
18 trace->nr_entries = 0;
19 do {
20 unw_get_ip(info, &ip);
21 if (ip == 0)
22 break;
23 if (skip == 0) {
24 trace->entries[trace->nr_entries++] = ip;
25 if (trace->nr_entries == trace->max_entries)
26 break;
27 } else
28 skip--;
29 } while (unw_unwind(info) >= 0);
30}
31
32/*
33 * Save stack-backtrace addresses into a stack_trace buffer.
34 */
35void save_stack_trace(struct stack_trace *trace)
36{
37 unw_init_running(ia64_do_save_stack, trace);
38}
39EXPORT_SYMBOL(save_stack_trace);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ed6f22eb5b12..85118dfe9bb5 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -36,7 +36,7 @@
36static cycle_t itc_get_cycles(struct clocksource *cs); 36static cycle_t itc_get_cycles(struct clocksource *cs);
37 37
38struct fsyscall_gtod_data_t fsyscall_gtod_data = { 38struct fsyscall_gtod_data_t fsyscall_gtod_data = {
39 .lock = SEQLOCK_UNLOCKED, 39 .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
40}; 40};
41 41
42struct itc_jitter_data_t itc_jitter_data; 42struct itc_jitter_data_t itc_jitter_data;
@@ -73,8 +73,6 @@ static struct clocksource clocksource_itc = {
73 .rating = 350, 73 .rating = 350,
74 .read = itc_get_cycles, 74 .read = itc_get_cycles,
75 .mask = CLOCKSOURCE_MASK(64), 75 .mask = CLOCKSOURCE_MASK(64),
76 .mult = 0, /*to be calculated*/
77 .shift = 16,
78 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 76 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
79#ifdef CONFIG_PARAVIRT 77#ifdef CONFIG_PARAVIRT
80 .resume = paravirt_clocksource_resume, 78 .resume = paravirt_clocksource_resume,
@@ -168,7 +166,7 @@ timer_interrupt (int irq, void *dev_id)
168{ 166{
169 unsigned long new_itm; 167 unsigned long new_itm;
170 168
171 if (unlikely(cpu_is_offline(smp_processor_id()))) { 169 if (cpu_is_offline(smp_processor_id())) {
172 return IRQ_HANDLED; 170 return IRQ_HANDLED;
173 } 171 }
174 172
@@ -190,19 +188,10 @@ timer_interrupt (int irq, void *dev_id)
190 188
191 new_itm += local_cpu_data->itm_delta; 189 new_itm += local_cpu_data->itm_delta;
192 190
193 if (smp_processor_id() == time_keeper_id) { 191 if (smp_processor_id() == time_keeper_id)
194 /* 192 xtime_update(1);
195 * Here we are in the timer irq handler. We have irqs locally 193
196 * disabled, but we don't know if the timer_bh is running on 194 local_cpu_data->itm_next = new_itm;
197 * another CPU. We need to avoid to SMP race by acquiring the
198 * xtime_lock.
199 */
200 write_seqlock(&xtime_lock);
201 do_timer(1);
202 local_cpu_data->itm_next = new_itm;
203 write_sequnlock(&xtime_lock);
204 } else
205 local_cpu_data->itm_next = new_itm;
206 195
207 if (time_after(new_itm, ia64_get_itc())) 196 if (time_after(new_itm, ia64_get_itc()))
208 break; 197 break;
@@ -222,7 +211,7 @@ skip_process_time_accounting:
222 * comfort, we increase the safety margin by 211 * comfort, we increase the safety margin by
223 * intentionally dropping the next tick(s). We do NOT 212 * intentionally dropping the next tick(s). We do NOT
224 * update itm.next because that would force us to call 213 * update itm.next because that would force us to call
225 * do_timer() which in turn would let our clock run 214 * xtime_update() which in turn would let our clock run
226 * too fast (with the potentially devastating effect 215 * too fast (with the potentially devastating effect
227 * of losing monotony of time). 216 * of losing monotony of time).
228 */ 217 */
@@ -374,11 +363,8 @@ ia64_init_itm (void)
374 ia64_cpu_local_tick(); 363 ia64_cpu_local_tick();
375 364
376 if (!itc_clocksource) { 365 if (!itc_clocksource) {
377 /* Sort out mult/shift values: */ 366 clocksource_register_hz(&clocksource_itc,
378 clocksource_itc.mult = 367 local_cpu_data->itc_freq);
379 clocksource_hz2mult(local_cpu_data->itc_freq,
380 clocksource_itc.shift);
381 clocksource_register(&clocksource_itc);
382 itc_clocksource = &clocksource_itc; 368 itc_clocksource = &clocksource_itc;
383 } 369 }
384} 370}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 0baa1bbb65fe..0e0e0cc9e392 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -43,7 +43,7 @@ int __ref arch_register_cpu(int num)
43{ 43{
44#ifdef CONFIG_ACPI 44#ifdef CONFIG_ACPI
45 /* 45 /*
46 * If CPEI can be re-targetted or if this is not 46 * If CPEI can be re-targeted or if this is not
47 * CPEI target, then it is hotpluggable 47 * CPEI target, then it is hotpluggable
48 */ 48 */
49 if (can_cpei_retarget() || !is_cpu_cpei_target(num)) 49 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index b6c0e63a0bf6..fed6afa2e8a9 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -1204,10 +1204,10 @@ desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word
1204static inline unw_hash_index_t 1204static inline unw_hash_index_t
1205hash (unsigned long ip) 1205hash (unsigned long ip)
1206{ 1206{
1207# define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */ 1207 /* magic number = ((sqrt(5)-1)/2)*2^64 */
1208 static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
1208 1209
1209 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE); 1210 return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1210#undef hashmagic
1211} 1211}
1212 1212
1213static inline long 1213static inline long
@@ -1531,7 +1531,7 @@ build_script (struct unw_frame_info *info)
1531 struct unw_labeled_state *ls, *next; 1531 struct unw_labeled_state *ls, *next;
1532 unsigned long ip = info->ip; 1532 unsigned long ip = info->ip;
1533 struct unw_state_record sr; 1533 struct unw_state_record sr;
1534 struct unw_table *table; 1534 struct unw_table *table, *prev;
1535 struct unw_reg_info *r; 1535 struct unw_reg_info *r;
1536 struct unw_insn insn; 1536 struct unw_insn insn;
1537 u8 *dp, *desc_end; 1537 u8 *dp, *desc_end;
@@ -1560,11 +1560,26 @@ build_script (struct unw_frame_info *info)
1560 1560
1561 STAT(parse_start = ia64_get_itc()); 1561 STAT(parse_start = ia64_get_itc());
1562 1562
1563 prev = NULL;
1563 for (table = unw.tables; table; table = table->next) { 1564 for (table = unw.tables; table; table = table->next) {
1564 if (ip >= table->start && ip < table->end) { 1565 if (ip >= table->start && ip < table->end) {
1566 /*
1567 * Leave the kernel unwind table at the very front,
1568 * lest moving it breaks some assumption elsewhere.
1569 * Otherwise, move the matching table to the second
1570 * position in the list so that traversals can benefit
1571 * from commonality in backtrace paths.
1572 */
1573 if (prev && prev != unw.tables) {
1574 /* unw is safe - we're already spinlocked */
1575 prev->next = table->next;
1576 table->next = unw.tables->next;
1577 unw.tables->next = table;
1578 }
1565 e = lookup(table, ip - table->segment_base); 1579 e = lookup(table, ip - table->segment_base);
1566 break; 1580 break;
1567 } 1581 }
1582 prev = table;
1568 } 1583 }
1569 if (!e) { 1584 if (!e) {
1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ 1585 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a4d044dcb1c..53c0ba004e9e 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -198,7 +198,7 @@ SECTIONS {
198 198
199 /* Per-cpu data: */ 199 /* Per-cpu data: */
200 . = ALIGN(PERCPU_PAGE_SIZE); 200 . = ALIGN(PERCPU_PAGE_SIZE);
201 PERCPU_VADDR(PERCPU_ADDR, :percpu) 201 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
202 __phys_per_cpu_start = __per_cpu_load; 202 __phys_per_cpu_start = __per_cpu_load;
203 /* 203 /*
204 * ensure percpu data fits 204 * ensure percpu data fits
@@ -209,6 +209,7 @@ SECTIONS {
209 data : { 209 data : {
210 } :data 210 } :data
211 .data : AT(ADDR(.data) - LOAD_OFFSET) { 211 .data : AT(ADDR(.data) - LOAD_OFFSET) {
212 _sdata = .;
212 INIT_TASK_DATA(PAGE_SIZE) 213 INIT_TASK_DATA(PAGE_SIZE)
213 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) 214 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
214 READ_MOSTLY_DATA(SMP_CACHE_BYTES) 215 READ_MOSTLY_DATA(SMP_CACHE_BYTES)