aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-05-14 06:06:36 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-05-14 06:06:36 -0400
commita18f22a968de17b29f2310cdb7ba69163e65ec15 (patch)
treea7d56d88fad5e444d7661484109758a2f436129e /arch/ia64/kernel
parenta1c57e0fec53defe745e64417eacdbd3618c3e66 (diff)
parent798778b8653f64b7b2162ac70eca10367cff6ce8 (diff)
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts: arch/ia64/kernel/cyclone.c arch/mips/kernel/i8253.c arch/x86/kernel/i8253.c Reason: Resolve conflicts so further cleanups do not conflict further Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c23
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/iosapic.c119
-rw-r--r--arch/ia64/kernel/irq.c73
-rw-r--r--arch/ia64/kernel/irq_ia64.c10
-rw-r--r--arch/ia64/kernel/irq_lsapic.c23
-rw-r--r--arch/ia64/kernel/mca.c9
-rw-r--r--arch/ia64/kernel/msi_ia64.c49
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c2
-rw-r--r--arch/ia64/kernel/setup.c18
-rw-r--r--arch/ia64/kernel/smpboot.c16
-rw-r--r--arch/ia64/kernel/time.c19
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
16 files changed, 149 insertions, 224 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 90ebceb899a0..3be485a300b1 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -803,7 +803,7 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
803 * ACPI based hotplug CPU support 803 * ACPI based hotplug CPU support
804 */ 804 */
805#ifdef CONFIG_ACPI_HOTPLUG_CPU 805#ifdef CONFIG_ACPI_HOTPLUG_CPU
806static 806static __cpuinit
807int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 807int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
808{ 808{
809#ifdef CONFIG_ACPI_NUMA 809#ifdef CONFIG_ACPI_NUMA
@@ -878,7 +878,7 @@ __init void prefill_possible_map(void)
878 set_cpu_possible(i, true); 878 set_cpu_possible(i, true);
879} 879}
880 880
881int acpi_map_lsapic(acpi_handle handle, int *pcpu) 881static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
882{ 882{
883 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 883 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
884 union acpi_object *obj; 884 union acpi_object *obj;
@@ -929,6 +929,11 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
929 return (0); 929 return (0);
930} 930}
931 931
932/* wrapper to silence section mismatch warning */
933int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
934{
935 return _acpi_map_lsapic(handle, pcpu);
936}
932EXPORT_SYMBOL(acpi_map_lsapic); 937EXPORT_SYMBOL(acpi_map_lsapic);
933 938
934int acpi_unmap_lsapic(int cpu) 939int acpi_unmap_lsapic(int cpu)
@@ -1034,18 +1039,8 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
1034EXPORT_SYMBOL(acpi_unregister_ioapic); 1039EXPORT_SYMBOL(acpi_unregister_ioapic);
1035 1040
1036/* 1041/*
1037 * acpi_save_state_mem() - save kernel state 1042 * acpi_suspend_lowlevel() - save kernel state and suspend.
1038 * 1043 *
1039 * TBD when when IA64 starts to support suspend... 1044 * TBD when when IA64 starts to support suspend...
1040 */ 1045 */
1041int acpi_save_state_mem(void) { return 0; } 1046int acpi_suspend_lowlevel(void) { return 0; }
1042
1043/*
1044 * acpi_restore_state()
1045 */
1046void acpi_restore_state_mem(void) {}
1047
1048/*
1049 * do_suspend_lowlevel()
1050 */
1051void do_suspend_lowlevel(void) {}
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 23e91290e41f..c8c9298666fb 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -13,9 +13,6 @@
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15 15
16/* Stores the physical address of elf header of crash image. */
17unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
18
19/** 16/**
20 * copy_oldmem_page - copy one page from "oldmem" 17 * copy_oldmem_page - copy one page from "oldmem"
21 * @pfn: page frame number to be copied 18 * @pfn: page frame number to be copied
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index a0f001928502..6fc03aff046c 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -23,6 +23,7 @@
23 */ 23 */
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/crash_dump.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/types.h> 29#include <linux/types.h>
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 244704a174de..6de2e23b3636 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1771,6 +1771,10 @@ sys_call_table:
1771 data8 sys_fanotify_init 1771 data8 sys_fanotify_init
1772 data8 sys_fanotify_mark 1772 data8 sys_fanotify_mark
1773 data8 sys_prlimit64 // 1325 1773 data8 sys_prlimit64 // 1325
1774 data8 sys_name_to_handle_at
1775 data8 sys_open_by_handle_at
1776 data8 sys_clock_adjtime
1777 data8 sys_syncfs
1774 1778
1775 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1779 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1776#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1780#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 22c38404f539..b0f9afebb146 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -257,7 +257,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
257} 257}
258 258
259static void 259static void
260nop (unsigned int irq) 260nop (struct irq_data *data)
261{ 261{
262 /* do nothing... */ 262 /* do nothing... */
263} 263}
@@ -287,8 +287,9 @@ kexec_disable_iosapic(void)
287#endif 287#endif
288 288
289static void 289static void
290mask_irq (unsigned int irq) 290mask_irq (struct irq_data *data)
291{ 291{
292 unsigned int irq = data->irq;
292 u32 low32; 293 u32 low32;
293 int rte_index; 294 int rte_index;
294 struct iosapic_rte_info *rte; 295 struct iosapic_rte_info *rte;
@@ -305,8 +306,9 @@ mask_irq (unsigned int irq)
305} 306}
306 307
307static void 308static void
308unmask_irq (unsigned int irq) 309unmask_irq (struct irq_data *data)
309{ 310{
311 unsigned int irq = data->irq;
310 u32 low32; 312 u32 low32;
311 int rte_index; 313 int rte_index;
312 struct iosapic_rte_info *rte; 314 struct iosapic_rte_info *rte;
@@ -323,9 +325,11 @@ unmask_irq (unsigned int irq)
323 325
324 326
325static int 327static int
326iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) 328iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
329 bool force)
327{ 330{
328#ifdef CONFIG_SMP 331#ifdef CONFIG_SMP
332 unsigned int irq = data->irq;
329 u32 high32, low32; 333 u32 high32, low32;
330 int cpu, dest, rte_index; 334 int cpu, dest, rte_index;
331 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 335 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
@@ -379,32 +383,33 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
379 */ 383 */
380 384
381static unsigned int 385static unsigned int
382iosapic_startup_level_irq (unsigned int irq) 386iosapic_startup_level_irq (struct irq_data *data)
383{ 387{
384 unmask_irq(irq); 388 unmask_irq(data);
385 return 0; 389 return 0;
386} 390}
387 391
388static void 392static void
389iosapic_unmask_level_irq (unsigned int irq) 393iosapic_unmask_level_irq (struct irq_data *data)
390{ 394{
395 unsigned int irq = data->irq;
391 ia64_vector vec = irq_to_vector(irq); 396 ia64_vector vec = irq_to_vector(irq);
392 struct iosapic_rte_info *rte; 397 struct iosapic_rte_info *rte;
393 int do_unmask_irq = 0; 398 int do_unmask_irq = 0;
394 399
395 irq_complete_move(irq); 400 irq_complete_move(irq);
396 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 401 if (unlikely(irqd_is_setaffinity_pending(data))) {
397 do_unmask_irq = 1; 402 do_unmask_irq = 1;
398 mask_irq(irq); 403 mask_irq(data);
399 } else 404 } else
400 unmask_irq(irq); 405 unmask_irq(data);
401 406
402 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) 407 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
403 iosapic_eoi(rte->iosapic->addr, vec); 408 iosapic_eoi(rte->iosapic->addr, vec);
404 409
405 if (unlikely(do_unmask_irq)) { 410 if (unlikely(do_unmask_irq)) {
406 move_masked_irq(irq); 411 irq_move_masked_irq(data);
407 unmask_irq(irq); 412 unmask_irq(data);
408 } 413 }
409} 414}
410 415
@@ -414,15 +419,15 @@ iosapic_unmask_level_irq (unsigned int irq)
414#define iosapic_ack_level_irq nop 419#define iosapic_ack_level_irq nop
415 420
416static struct irq_chip irq_type_iosapic_level = { 421static struct irq_chip irq_type_iosapic_level = {
417 .name = "IO-SAPIC-level", 422 .name = "IO-SAPIC-level",
418 .startup = iosapic_startup_level_irq, 423 .irq_startup = iosapic_startup_level_irq,
419 .shutdown = iosapic_shutdown_level_irq, 424 .irq_shutdown = iosapic_shutdown_level_irq,
420 .enable = iosapic_enable_level_irq, 425 .irq_enable = iosapic_enable_level_irq,
421 .disable = iosapic_disable_level_irq, 426 .irq_disable = iosapic_disable_level_irq,
422 .ack = iosapic_ack_level_irq, 427 .irq_ack = iosapic_ack_level_irq,
423 .mask = mask_irq, 428 .irq_mask = mask_irq,
424 .unmask = iosapic_unmask_level_irq, 429 .irq_unmask = iosapic_unmask_level_irq,
425 .set_affinity = iosapic_set_affinity 430 .irq_set_affinity = iosapic_set_affinity
426}; 431};
427 432
428/* 433/*
@@ -430,9 +435,9 @@ static struct irq_chip irq_type_iosapic_level = {
430 */ 435 */
431 436
432static unsigned int 437static unsigned int
433iosapic_startup_edge_irq (unsigned int irq) 438iosapic_startup_edge_irq (struct irq_data *data)
434{ 439{
435 unmask_irq(irq); 440 unmask_irq(data);
436 /* 441 /*
437 * IOSAPIC simply drops interrupts pended while the 442 * IOSAPIC simply drops interrupts pended while the
438 * corresponding pin was masked, so we can't know if an 443 * corresponding pin was masked, so we can't know if an
@@ -442,37 +447,25 @@ iosapic_startup_edge_irq (unsigned int irq)
442} 447}
443 448
444static void 449static void
445iosapic_ack_edge_irq (unsigned int irq) 450iosapic_ack_edge_irq (struct irq_data *data)
446{ 451{
447 struct irq_desc *idesc = irq_desc + irq; 452 irq_complete_move(data->irq);
448 453 irq_move_irq(data);
449 irq_complete_move(irq);
450 move_native_irq(irq);
451 /*
452 * Once we have recorded IRQ_PENDING already, we can mask the
453 * interrupt for real. This prevents IRQ storms from unhandled
454 * devices.
455 */
456 if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
457 (IRQ_PENDING|IRQ_DISABLED))
458 mask_irq(irq);
459} 454}
460 455
461#define iosapic_enable_edge_irq unmask_irq 456#define iosapic_enable_edge_irq unmask_irq
462#define iosapic_disable_edge_irq nop 457#define iosapic_disable_edge_irq nop
463#define iosapic_end_edge_irq nop
464 458
465static struct irq_chip irq_type_iosapic_edge = { 459static struct irq_chip irq_type_iosapic_edge = {
466 .name = "IO-SAPIC-edge", 460 .name = "IO-SAPIC-edge",
467 .startup = iosapic_startup_edge_irq, 461 .irq_startup = iosapic_startup_edge_irq,
468 .shutdown = iosapic_disable_edge_irq, 462 .irq_shutdown = iosapic_disable_edge_irq,
469 .enable = iosapic_enable_edge_irq, 463 .irq_enable = iosapic_enable_edge_irq,
470 .disable = iosapic_disable_edge_irq, 464 .irq_disable = iosapic_disable_edge_irq,
471 .ack = iosapic_ack_edge_irq, 465 .irq_ack = iosapic_ack_edge_irq,
472 .end = iosapic_end_edge_irq, 466 .irq_mask = mask_irq,
473 .mask = mask_irq, 467 .irq_unmask = unmask_irq,
474 .unmask = unmask_irq, 468 .irq_set_affinity = iosapic_set_affinity
475 .set_affinity = iosapic_set_affinity
476}; 469};
477 470
478static unsigned int 471static unsigned int
@@ -562,8 +555,7 @@ static int
562register_intr (unsigned int gsi, int irq, unsigned char delivery, 555register_intr (unsigned int gsi, int irq, unsigned char delivery,
563 unsigned long polarity, unsigned long trigger) 556 unsigned long polarity, unsigned long trigger)
564{ 557{
565 struct irq_desc *idesc; 558 struct irq_chip *chip, *irq_type;
566 struct irq_chip *irq_type;
567 int index; 559 int index;
568 struct iosapic_rte_info *rte; 560 struct iosapic_rte_info *rte;
569 561
@@ -610,19 +602,18 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
610 602
611 irq_type = iosapic_get_irq_chip(trigger); 603 irq_type = iosapic_get_irq_chip(trigger);
612 604
613 idesc = irq_desc + irq; 605 chip = irq_get_chip(irq);
614 if (irq_type != NULL && idesc->chip != irq_type) { 606 if (irq_type != NULL && chip != irq_type) {
615 if (idesc->chip != &no_irq_chip) 607 if (chip != &no_irq_chip)
616 printk(KERN_WARNING 608 printk(KERN_WARNING
617 "%s: changing vector %d from %s to %s\n", 609 "%s: changing vector %d from %s to %s\n",
618 __func__, irq_to_vector(irq), 610 __func__, irq_to_vector(irq),
619 idesc->chip->name, irq_type->name); 611 chip->name, irq_type->name);
620 idesc->chip = irq_type; 612 chip = irq_type;
621 } 613 }
622 if (trigger == IOSAPIC_EDGE) 614 __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ?
623 __set_irq_handler_unlocked(irq, handle_edge_irq); 615 handle_edge_irq : handle_level_irq,
624 else 616 NULL);
625 __set_irq_handler_unlocked(irq, handle_level_irq);
626 return 0; 617 return 0;
627} 618}
628 619
@@ -732,6 +723,7 @@ iosapic_register_intr (unsigned int gsi,
732 struct iosapic_rte_info *rte; 723 struct iosapic_rte_info *rte;
733 u32 low32; 724 u32 low32;
734 unsigned char dmode; 725 unsigned char dmode;
726 struct irq_desc *desc;
735 727
736 /* 728 /*
737 * If this GSI has already been registered (i.e., it's a 729 * If this GSI has already been registered (i.e., it's a
@@ -759,12 +751,13 @@ iosapic_register_intr (unsigned int gsi,
759 goto unlock_iosapic_lock; 751 goto unlock_iosapic_lock;
760 } 752 }
761 753
762 raw_spin_lock(&irq_desc[irq].lock); 754 desc = irq_to_desc(irq);
755 raw_spin_lock(&desc->lock);
763 dest = get_target_cpu(gsi, irq); 756 dest = get_target_cpu(gsi, irq);
764 dmode = choose_dmode(); 757 dmode = choose_dmode();
765 err = register_intr(gsi, irq, dmode, polarity, trigger); 758 err = register_intr(gsi, irq, dmode, polarity, trigger);
766 if (err < 0) { 759 if (err < 0) {
767 raw_spin_unlock(&irq_desc[irq].lock); 760 raw_spin_unlock(&desc->lock);
768 irq = err; 761 irq = err;
769 goto unlock_iosapic_lock; 762 goto unlock_iosapic_lock;
770 } 763 }
@@ -783,7 +776,7 @@ iosapic_register_intr (unsigned int gsi,
783 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 776 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
784 cpu_logical_id(dest), dest, irq_to_vector(irq)); 777 cpu_logical_id(dest), dest, irq_to_vector(irq));
785 778
786 raw_spin_unlock(&irq_desc[irq].lock); 779 raw_spin_unlock(&desc->lock);
787 unlock_iosapic_lock: 780 unlock_iosapic_lock:
788 spin_unlock_irqrestore(&iosapic_lock, flags); 781 spin_unlock_irqrestore(&iosapic_lock, flags);
789 return irq; 782 return irq;
@@ -794,7 +787,6 @@ iosapic_unregister_intr (unsigned int gsi)
794{ 787{
795 unsigned long flags; 788 unsigned long flags;
796 int irq, index; 789 int irq, index;
797 struct irq_desc *idesc;
798 u32 low32; 790 u32 low32;
799 unsigned long trigger, polarity; 791 unsigned long trigger, polarity;
800 unsigned int dest; 792 unsigned int dest;
@@ -824,7 +816,6 @@ iosapic_unregister_intr (unsigned int gsi)
824 if (--rte->refcnt > 0) 816 if (--rte->refcnt > 0)
825 goto out; 817 goto out;
826 818
827 idesc = irq_desc + irq;
828 rte->refcnt = NO_REF_RTE; 819 rte->refcnt = NO_REF_RTE;
829 820
830 /* Mask the interrupt */ 821 /* Mask the interrupt */
@@ -848,7 +839,7 @@ iosapic_unregister_intr (unsigned int gsi)
848 if (iosapic_intr_info[irq].count == 0) { 839 if (iosapic_intr_info[irq].count == 0) {
849#ifdef CONFIG_SMP 840#ifdef CONFIG_SMP
850 /* Clear affinity */ 841 /* Clear affinity */
851 cpumask_setall(idesc->affinity); 842 cpumask_setall(irq_get_irq_data(irq)->affinity);
852#endif 843#endif
853 /* Clear the interrupt information */ 844 /* Clear the interrupt information */
854 iosapic_intr_info[irq].dest = 0; 845 iosapic_intr_info[irq].dest = 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 94ee9d067cbd..ad69606613eb 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -53,47 +53,9 @@ atomic_t irq_err_count;
53/* 53/*
54 * /proc/interrupts printing: 54 * /proc/interrupts printing:
55 */ 55 */
56 56int arch_show_interrupts(struct seq_file *p, int prec)
57int show_interrupts(struct seq_file *p, void *v)
58{ 57{
59 int i = *(loff_t *) v, j; 58 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
60 struct irqaction * action;
61 unsigned long flags;
62
63 if (i == 0) {
64 char cpuname[16];
65 seq_printf(p, " ");
66 for_each_online_cpu(j) {
67 snprintf(cpuname, 10, "CPU%d", j);
68 seq_printf(p, "%10s ", cpuname);
69 }
70 seq_putc(p, '\n');
71 }
72
73 if (i < NR_IRQS) {
74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
75 action = irq_desc[i].action;
76 if (!action)
77 goto skip;
78 seq_printf(p, "%3d: ",i);
79#ifndef CONFIG_SMP
80 seq_printf(p, "%10u ", kstat_irqs(i));
81#else
82 for_each_online_cpu(j) {
83 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
84 }
85#endif
86 seq_printf(p, " %14s", irq_desc[i].chip->name);
87 seq_printf(p, " %s", action->name);
88
89 for (action=action->next; action; action = action->next)
90 seq_printf(p, ", %s", action->name);
91
92 seq_putc(p, '\n');
93skip:
94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
95 } else if (i == NR_IRQS)
96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97 return 0; 59 return 0;
98} 60}
99 61
@@ -103,7 +65,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
103void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 65void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
104{ 66{
105 if (irq < NR_IRQS) { 67 if (irq < NR_IRQS) {
106 cpumask_copy(irq_desc[irq].affinity, 68 cpumask_copy(irq_get_irq_data(irq)->affinity,
107 cpumask_of(cpu_logical_id(hwid))); 69 cpumask_of(cpu_logical_id(hwid)));
108 irq_redir[irq] = (char) (redir & 0xff); 70 irq_redir[irq] = (char) (redir & 0xff);
109 } 71 }
@@ -130,13 +92,14 @@ unsigned int vectors_in_migration[NR_IRQS];
130 */ 92 */
131static void migrate_irqs(void) 93static void migrate_irqs(void)
132{ 94{
133 struct irq_desc *desc;
134 int irq, new_cpu; 95 int irq, new_cpu;
135 96
136 for (irq=0; irq < NR_IRQS; irq++) { 97 for (irq=0; irq < NR_IRQS; irq++) {
137 desc = irq_desc + irq; 98 struct irq_desc *desc = irq_to_desc(irq);
99 struct irq_data *data = irq_desc_get_irq_data(desc);
100 struct irq_chip *chip = irq_data_get_irq_chip(data);
138 101
139 if (desc->status == IRQ_DISABLED) 102 if (irqd_irq_disabled(data))
140 continue; 103 continue;
141 104
142 /* 105 /*
@@ -145,10 +108,10 @@ static void migrate_irqs(void)
145 * tell CPU not to respond to these local intr sources. 108 * tell CPU not to respond to these local intr sources.
146 * such as ITV,CPEI,MCA etc. 109 * such as ITV,CPEI,MCA etc.
147 */ 110 */
148 if (desc->status == IRQ_PER_CPU) 111 if (irqd_is_per_cpu(data))
149 continue; 112 continue;
150 113
151 if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) 114 if (cpumask_any_and(data->affinity, cpu_online_mask)
152 >= nr_cpu_ids) { 115 >= nr_cpu_ids) {
153 /* 116 /*
154 * Save it for phase 2 processing 117 * Save it for phase 2 processing
@@ -160,16 +123,16 @@ static void migrate_irqs(void)
160 /* 123 /*
161 * Al three are essential, currently WARN_ON.. maybe panic? 124 * Al three are essential, currently WARN_ON.. maybe panic?
162 */ 125 */
163 if (desc->chip && desc->chip->disable && 126 if (chip && chip->irq_disable &&
164 desc->chip->enable && desc->chip->set_affinity) { 127 chip->irq_enable && chip->irq_set_affinity) {
165 desc->chip->disable(irq); 128 chip->irq_disable(data);
166 desc->chip->set_affinity(irq, 129 chip->irq_set_affinity(data,
167 cpumask_of(new_cpu)); 130 cpumask_of(new_cpu), false);
168 desc->chip->enable(irq); 131 chip->irq_enable(data);
169 } else { 132 } else {
170 WARN_ON((!(desc->chip) || !(desc->chip->disable) || 133 WARN_ON((!chip || !chip->irq_disable ||
171 !(desc->chip->enable) || 134 !chip->irq_enable ||
172 !(desc->chip->set_affinity))); 135 !chip->irq_set_affinity));
173 } 136 }
174 } 137 }
175 } 138 }
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 38c07b866901..5b704740f160 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -343,7 +343,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
343 if (irq < 0) 343 if (irq < 0)
344 continue; 344 continue;
345 345
346 desc = irq_desc + irq; 346 desc = irq_to_desc(irq);
347 cfg = irq_cfg + irq; 347 cfg = irq_cfg + irq;
348 raw_spin_lock(&desc->lock); 348 raw_spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count) 349 if (!cfg->move_cleanup_count)
@@ -626,17 +626,15 @@ static struct irqaction tlb_irqaction = {
626void 626void
627ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) 627ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
628{ 628{
629 struct irq_desc *desc;
630 unsigned int irq; 629 unsigned int irq;
631 630
632 irq = vec; 631 irq = vec;
633 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); 632 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
634 desc = irq_desc + irq; 633 irq_set_status_flags(irq, IRQ_PER_CPU);
635 desc->status |= IRQ_PER_CPU; 634 irq_set_chip(irq, &irq_type_ia64_lsapic);
636 set_irq_chip(irq, &irq_type_ia64_lsapic);
637 if (action) 635 if (action)
638 setup_irq(irq, action); 636 setup_irq(irq, action);
639 set_irq_handler(irq, handle_percpu_irq); 637 irq_set_handler(irq, handle_percpu_irq);
640} 638}
641 639
642void __init 640void __init
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index fc1549d4564d..1b3a776e5161 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -15,31 +15,30 @@
15#include <linux/irq.h> 15#include <linux/irq.h>
16 16
17static unsigned int 17static unsigned int
18lsapic_noop_startup (unsigned int irq) 18lsapic_noop_startup (struct irq_data *data)
19{ 19{
20 return 0; 20 return 0;
21} 21}
22 22
23static void 23static void
24lsapic_noop (unsigned int irq) 24lsapic_noop (struct irq_data *data)
25{ 25{
26 /* nothing to do... */ 26 /* nothing to do... */
27} 27}
28 28
29static int lsapic_retrigger(unsigned int irq) 29static int lsapic_retrigger(struct irq_data *data)
30{ 30{
31 ia64_resend_irq(irq); 31 ia64_resend_irq(data->irq);
32 32
33 return 1; 33 return 1;
34} 34}
35 35
36struct irq_chip irq_type_ia64_lsapic = { 36struct irq_chip irq_type_ia64_lsapic = {
37 .name = "LSAPIC", 37 .name = "LSAPIC",
38 .startup = lsapic_noop_startup, 38 .irq_startup = lsapic_noop_startup,
39 .shutdown = lsapic_noop, 39 .irq_shutdown = lsapic_noop,
40 .enable = lsapic_noop, 40 .irq_enable = lsapic_noop,
41 .disable = lsapic_noop, 41 .irq_disable = lsapic_noop,
42 .ack = lsapic_noop, 42 .irq_ack = lsapic_noop,
43 .end = lsapic_noop, 43 .irq_retrigger = lsapic_retrigger,
44 .retrigger = lsapic_retrigger,
45}; 44};
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 1753f6a30d55..84fb405eee87 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -582,6 +582,8 @@ out:
582 /* Get the CPE error record and log it */ 582 /* Get the CPE error record and log it */
583 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); 583 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
584 584
585 local_irq_disable();
586
585 return IRQ_HANDLED; 587 return IRQ_HANDLED;
586} 588}
587 589
@@ -1859,7 +1861,8 @@ ia64_mca_cpu_init(void *cpu_data)
1859 data = mca_bootmem(); 1861 data = mca_bootmem();
1860 first_time = 0; 1862 first_time = 0;
1861 } else 1863 } else
1862 data = __get_free_pages(GFP_KERNEL, get_order(sz)); 1864 data = (void *)__get_free_pages(GFP_KERNEL,
1865 get_order(sz));
1863 if (!data) 1866 if (!data)
1864 panic("Could not allocate MCA memory for cpu %d\n", 1867 panic("Could not allocate MCA memory for cpu %d\n",
1865 cpu); 1868 cpu);
@@ -2122,7 +2125,6 @@ ia64_mca_late_init(void)
2122 cpe_poll_timer.function = ia64_mca_cpe_poll; 2125 cpe_poll_timer.function = ia64_mca_cpe_poll;
2123 2126
2124 { 2127 {
2125 struct irq_desc *desc;
2126 unsigned int irq; 2128 unsigned int irq;
2127 2129
2128 if (cpe_vector >= 0) { 2130 if (cpe_vector >= 0) {
@@ -2130,8 +2132,7 @@ ia64_mca_late_init(void)
2130 irq = local_vector_to_irq(cpe_vector); 2132 irq = local_vector_to_irq(cpe_vector);
2131 if (irq > 0) { 2133 if (irq > 0) {
2132 cpe_poll_enabled = 0; 2134 cpe_poll_enabled = 0;
2133 desc = irq_desc + irq; 2135 irq_set_status_flags(irq, IRQ_PER_CPU);
2134 desc->status |= IRQ_PER_CPU;
2135 setup_irq(irq, &mca_cpe_irqaction); 2136 setup_irq(irq, &mca_cpe_irqaction);
2136 ia64_cpe_irq = irq; 2137 ia64_cpe_irq = irq;
2137 ia64_mca_register_cpev(cpe_vector); 2138 ia64_mca_register_cpev(cpe_vector);
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 00b19a416eab..009df5434a7a 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -12,12 +12,13 @@
12static struct irq_chip ia64_msi_chip; 12static struct irq_chip ia64_msi_chip;
13 13
14#ifdef CONFIG_SMP 14#ifdef CONFIG_SMP
15static int ia64_set_msi_irq_affinity(unsigned int irq, 15static int ia64_set_msi_irq_affinity(struct irq_data *idata,
16 const cpumask_t *cpu_mask) 16 const cpumask_t *cpu_mask, bool force)
17{ 17{
18 struct msi_msg msg; 18 struct msi_msg msg;
19 u32 addr, data; 19 u32 addr, data;
20 int cpu = first_cpu(*cpu_mask); 20 int cpu = first_cpu(*cpu_mask);
21 unsigned int irq = idata->irq;
21 22
22 if (!cpu_online(cpu)) 23 if (!cpu_online(cpu))
23 return -1; 24 return -1;
@@ -38,7 +39,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq,
38 msg.data = data; 39 msg.data = data;
39 40
40 write_msi_msg(irq, &msg); 41 write_msi_msg(irq, &msg);
41 cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); 42 cpumask_copy(idata->affinity, cpumask_of(cpu));
42 43
43 return 0; 44 return 0;
44} 45}
@@ -55,7 +56,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
55 if (irq < 0) 56 if (irq < 0)
56 return irq; 57 return irq;
57 58
58 set_irq_msi(irq, desc); 59 irq_set_msi_desc(irq, desc);
59 cpus_and(mask, irq_to_domain(irq), cpu_online_map); 60 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
60 dest_phys_id = cpu_physical_id(first_cpu(mask)); 61 dest_phys_id = cpu_physical_id(first_cpu(mask));
61 vector = irq_to_vector(irq); 62 vector = irq_to_vector(irq);
@@ -74,7 +75,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
74 MSI_DATA_VECTOR(vector); 75 MSI_DATA_VECTOR(vector);
75 76
76 write_msi_msg(irq, &msg); 77 write_msi_msg(irq, &msg);
77 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 78 irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
78 79
79 return 0; 80 return 0;
80} 81}
@@ -84,16 +85,16 @@ void ia64_teardown_msi_irq(unsigned int irq)
84 destroy_irq(irq); 85 destroy_irq(irq);
85} 86}
86 87
87static void ia64_ack_msi_irq(unsigned int irq) 88static void ia64_ack_msi_irq(struct irq_data *data)
88{ 89{
89 irq_complete_move(irq); 90 irq_complete_move(data->irq);
90 move_native_irq(irq); 91 irq_move_irq(data);
91 ia64_eoi(); 92 ia64_eoi();
92} 93}
93 94
94static int ia64_msi_retrigger_irq(unsigned int irq) 95static int ia64_msi_retrigger_irq(struct irq_data *data)
95{ 96{
96 unsigned int vector = irq_to_vector(irq); 97 unsigned int vector = irq_to_vector(data->irq);
97 ia64_resend_irq(vector); 98 ia64_resend_irq(vector);
98 99
99 return 1; 100 return 1;
@@ -103,14 +104,14 @@ static int ia64_msi_retrigger_irq(unsigned int irq)
103 * Generic ops used on most IA64 platforms. 104 * Generic ops used on most IA64 platforms.
104 */ 105 */
105static struct irq_chip ia64_msi_chip = { 106static struct irq_chip ia64_msi_chip = {
106 .name = "PCI-MSI", 107 .name = "PCI-MSI",
107 .irq_mask = mask_msi_irq, 108 .irq_mask = mask_msi_irq,
108 .irq_unmask = unmask_msi_irq, 109 .irq_unmask = unmask_msi_irq,
109 .ack = ia64_ack_msi_irq, 110 .irq_ack = ia64_ack_msi_irq,
110#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
111 .set_affinity = ia64_set_msi_irq_affinity, 112 .irq_set_affinity = ia64_set_msi_irq_affinity,
112#endif 113#endif
113 .retrigger = ia64_msi_retrigger_irq, 114 .irq_retrigger = ia64_msi_retrigger_irq,
114}; 115};
115 116
116 117
@@ -132,8 +133,10 @@ void arch_teardown_msi_irq(unsigned int irq)
132 133
133#ifdef CONFIG_DMAR 134#ifdef CONFIG_DMAR
134#ifdef CONFIG_SMP 135#ifdef CONFIG_SMP
135static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 136static int dmar_msi_set_affinity(struct irq_data *data,
137 const struct cpumask *mask, bool force)
136{ 138{
139 unsigned int irq = data->irq;
137 struct irq_cfg *cfg = irq_cfg + irq; 140 struct irq_cfg *cfg = irq_cfg + irq;
138 struct msi_msg msg; 141 struct msi_msg msg;
139 int cpu = cpumask_first(mask); 142 int cpu = cpumask_first(mask);
@@ -152,7 +155,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
152 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); 155 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
153 156
154 dmar_msi_write(irq, &msg); 157 dmar_msi_write(irq, &msg);
155 cpumask_copy(irq_desc[irq].affinity, mask); 158 cpumask_copy(data->affinity, mask);
156 159
157 return 0; 160 return 0;
158} 161}
@@ -162,11 +165,11 @@ static struct irq_chip dmar_msi_type = {
162 .name = "DMAR_MSI", 165 .name = "DMAR_MSI",
163 .irq_unmask = dmar_msi_unmask, 166 .irq_unmask = dmar_msi_unmask,
164 .irq_mask = dmar_msi_mask, 167 .irq_mask = dmar_msi_mask,
165 .ack = ia64_ack_msi_irq, 168 .irq_ack = ia64_ack_msi_irq,
166#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
167 .set_affinity = dmar_msi_set_affinity, 170 .irq_set_affinity = dmar_msi_set_affinity,
168#endif 171#endif
169 .retrigger = ia64_msi_retrigger_irq, 172 .irq_retrigger = ia64_msi_retrigger_irq,
170}; 173};
171 174
172static int 175static int
@@ -203,8 +206,8 @@ int arch_setup_dmar_msi(unsigned int irq)
203 if (ret < 0) 206 if (ret < 0)
204 return ret; 207 return ret;
205 dmar_msi_write(irq, &msg); 208 dmar_msi_write(irq, &msg);
206 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 209 irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
207 "edge"); 210 "edge");
208 return 0; 211 return 0;
209} 212}
210#endif /* CONFIG_DMAR */ 213#endif /* CONFIG_DMAR */
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index 5f637bbfcccd..30c644ea44c9 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -150,7 +150,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
150 * current = task running at the time of the overflow. 150 * current = task running at the time of the overflow.
151 * 151 *
152 * per-task mode: 152 * per-task mode:
153 * - this is ususally the task being monitored. 153 * - this is usually the task being monitored.
154 * Under certain conditions, it might be a different task 154 * Under certain conditions, it might be a different task
155 * 155 *
156 * system-wide: 156 * system-wide:
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 911cf9749700..5e2c72498c51 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -479,25 +479,7 @@ static __init int setup_nomca(char *s)
479} 479}
480early_param("nomca", setup_nomca); 480early_param("nomca", setup_nomca);
481 481
482/*
483 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
484 * is_kdump_kernel() to determine if we are booting after a panic. Hence
485 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
486 */
487#ifdef CONFIG_CRASH_DUMP 482#ifdef CONFIG_CRASH_DUMP
488/* elfcorehdr= specifies the location of elf core header
489 * stored by the crashed kernel.
490 */
491static int __init parse_elfcorehdr(char *arg)
492{
493 if (!arg)
494 return -EINVAL;
495
496 elfcorehdr_addr = memparse(arg, &arg);
497 return 0;
498}
499early_param("elfcorehdr", parse_elfcorehdr);
500
501int __init reserve_elfcorehdr(u64 *start, u64 *end) 483int __init reserve_elfcorehdr(u64 *start, u64 *end)
502{ 484{
503 u64 length; 485 u64 length;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index d003b502a432..14ec641003da 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -677,7 +677,7 @@ extern void fixup_irqs(void);
677int migrate_platform_irqs(unsigned int cpu) 677int migrate_platform_irqs(unsigned int cpu)
678{ 678{
679 int new_cpei_cpu; 679 int new_cpei_cpu;
680 struct irq_desc *desc = NULL; 680 struct irq_data *data = NULL;
681 const struct cpumask *mask; 681 const struct cpumask *mask;
682 int retval = 0; 682 int retval = 0;
683 683
@@ -693,20 +693,20 @@ int migrate_platform_irqs(unsigned int cpu)
693 new_cpei_cpu = any_online_cpu(cpu_online_map); 693 new_cpei_cpu = any_online_cpu(cpu_online_map);
694 mask = cpumask_of(new_cpei_cpu); 694 mask = cpumask_of(new_cpei_cpu);
695 set_cpei_target_cpu(new_cpei_cpu); 695 set_cpei_target_cpu(new_cpei_cpu);
696 desc = irq_desc + ia64_cpe_irq; 696 data = irq_get_irq_data(ia64_cpe_irq);
697 /* 697 /*
698 * Switch for now, immediately, we need to do fake intr 698 * Switch for now, immediately, we need to do fake intr
699 * as other interrupts, but need to study CPEI behaviour with 699 * as other interrupts, but need to study CPEI behaviour with
700 * polling before making changes. 700 * polling before making changes.
701 */ 701 */
702 if (desc) { 702 if (data && data->chip) {
703 desc->chip->disable(ia64_cpe_irq); 703 data->chip->irq_disable(data);
704 desc->chip->set_affinity(ia64_cpe_irq, mask); 704 data->chip->irq_set_affinity(data, mask, false);
705 desc->chip->enable(ia64_cpe_irq); 705 data->chip->irq_enable(data);
706 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); 706 printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
707 } 707 }
708 } 708 }
709 if (!desc) { 709 if (!data) {
710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); 710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
711 retval = -EBUSY; 711 retval = -EBUSY;
712 } 712 }
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 41c40f0e4796..04440cc09b40 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -188,19 +188,10 @@ timer_interrupt (int irq, void *dev_id)
188 188
189 new_itm += local_cpu_data->itm_delta; 189 new_itm += local_cpu_data->itm_delta;
190 190
191 if (smp_processor_id() == time_keeper_id) { 191 if (smp_processor_id() == time_keeper_id)
192 /* 192 xtime_update(1);
193 * Here we are in the timer irq handler. We have irqs locally 193
194 * disabled, but we don't know if the timer_bh is running on 194 local_cpu_data->itm_next = new_itm;
195 * another CPU. We need to avoid to SMP race by acquiring the
196 * xtime_lock.
197 */
198 write_seqlock(&xtime_lock);
199 do_timer(1);
200 local_cpu_data->itm_next = new_itm;
201 write_sequnlock(&xtime_lock);
202 } else
203 local_cpu_data->itm_next = new_itm;
204 195
205 if (time_after(new_itm, ia64_get_itc())) 196 if (time_after(new_itm, ia64_get_itc()))
206 break; 197 break;
@@ -220,7 +211,7 @@ skip_process_time_accounting:
220 * comfort, we increase the safety margin by 211 * comfort, we increase the safety margin by
221 * intentionally dropping the next tick(s). We do NOT 212 * intentionally dropping the next tick(s). We do NOT
222 * update itm.next because that would force us to call 213 * update itm.next because that would force us to call
223 * do_timer() which in turn would let our clock run 214 * xtime_update() which in turn would let our clock run
224 * too fast (with the potentially devastating effect 215 * too fast (with the potentially devastating effect
225 * of losing monotony of time). 216 * of losing monotony of time).
226 */ 217 */
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 0baa1bbb65fe..0e0e0cc9e392 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -43,7 +43,7 @@ int __ref arch_register_cpu(int num)
43{ 43{
44#ifdef CONFIG_ACPI 44#ifdef CONFIG_ACPI
45 /* 45 /*
46 * If CPEI can be re-targetted or if this is not 46 * If CPEI can be re-targeted or if this is not
47 * CPEI target, then it is hotpluggable 47 * CPEI target, then it is hotpluggable
48 */ 48 */
49 if (can_cpei_retarget() || !is_cpu_cpei_target(num)) 49 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5a4d044dcb1c..787de4a77d82 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -198,7 +198,7 @@ SECTIONS {
198 198
199 /* Per-cpu data: */ 199 /* Per-cpu data: */
200 . = ALIGN(PERCPU_PAGE_SIZE); 200 . = ALIGN(PERCPU_PAGE_SIZE);
201 PERCPU_VADDR(PERCPU_ADDR, :percpu) 201 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
202 __phys_per_cpu_start = __per_cpu_load; 202 __phys_per_cpu_start = __per_cpu_load;
203 /* 203 /*
204 * ensure percpu data fits 204 * ensure percpu data fits