diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 32 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/ftrace.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 14 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 2 |
9 files changed, 25 insertions, 57 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 07d209c9507f..0d407b300762 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -54,10 +54,6 @@ | |||
54 | #include <asm/sal.h> | 54 | #include <asm/sal.h> |
55 | #include <asm/cyclone.h> | 55 | #include <asm/cyclone.h> |
56 | 56 | ||
57 | #define BAD_MADT_ENTRY(entry, end) ( \ | ||
58 | (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ | ||
59 | ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) | ||
60 | |||
61 | #define PREFIX "ACPI: " | 57 | #define PREFIX "ACPI: " |
62 | 58 | ||
63 | unsigned int acpi_cpei_override; | 59 | unsigned int acpi_cpei_override; |
@@ -803,14 +799,9 @@ int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) | |||
803 | * ACPI based hotplug CPU support | 799 | * ACPI based hotplug CPU support |
804 | */ | 800 | */ |
805 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 801 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
806 | static | 802 | static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) |
807 | int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | ||
808 | { | 803 | { |
809 | #ifdef CONFIG_ACPI_NUMA | 804 | #ifdef CONFIG_ACPI_NUMA |
810 | int pxm_id; | ||
811 | int nid; | ||
812 | |||
813 | pxm_id = acpi_get_pxm(handle); | ||
814 | /* | 805 | /* |
815 | * We don't have cpu-only-node hotadd. But if the system equips | 806 | * We don't have cpu-only-node hotadd. But if the system equips |
816 | * SRAT table, pxm is already found and node is ready. | 807 | * SRAT table, pxm is already found and node is ready. |
@@ -818,11 +809,10 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | |||
818 | * This code here is for the system which doesn't have full SRAT | 809 | * This code here is for the system which doesn't have full SRAT |
819 | * table for possible cpus. | 810 | * table for possible cpus. |
820 | */ | 811 | */ |
821 | nid = acpi_map_pxm_to_node(pxm_id); | ||
822 | node_cpuid[cpu].phys_id = physid; | 812 | node_cpuid[cpu].phys_id = physid; |
823 | node_cpuid[cpu].nid = nid; | 813 | node_cpuid[cpu].nid = acpi_get_node(handle); |
824 | #endif | 814 | #endif |
825 | return (0); | 815 | return 0; |
826 | } | 816 | } |
827 | 817 | ||
828 | int additional_cpus __initdata = -1; | 818 | int additional_cpus __initdata = -1; |
@@ -929,7 +919,7 @@ static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, | |||
929 | union acpi_object *obj; | 919 | union acpi_object *obj; |
930 | struct acpi_madt_io_sapic *iosapic; | 920 | struct acpi_madt_io_sapic *iosapic; |
931 | unsigned int gsi_base; | 921 | unsigned int gsi_base; |
932 | int pxm, node; | 922 | int node; |
933 | 923 | ||
934 | /* Only care about objects w/ a method that returns the MADT */ | 924 | /* Only care about objects w/ a method that returns the MADT */ |
935 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | 925 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) |
@@ -956,17 +946,9 @@ static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, | |||
956 | 946 | ||
957 | kfree(buffer.pointer); | 947 | kfree(buffer.pointer); |
958 | 948 | ||
959 | /* | 949 | /* OK, it's an IOSAPIC MADT entry; associate it with a node */ |
960 | * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell | 950 | node = acpi_get_node(handle); |
961 | * us which node to associate this with. | 951 | if (node == NUMA_NO_NODE || !node_online(node) || |
962 | */ | ||
963 | pxm = acpi_get_pxm(handle); | ||
964 | if (pxm < 0) | ||
965 | return AE_OK; | ||
966 | |||
967 | node = pxm_to_node(pxm); | ||
968 | |||
969 | if (node >= MAX_NUMNODES || !node_online(node) || | ||
970 | cpumask_empty(cpumask_of_node(node))) | 952 | cpumask_empty(cpumask_of_node(node))) |
971 | return AE_OK; | 953 | return AE_OK; |
972 | 954 | ||
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index da5b462e6de6..741b99c1a0b1 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -477,6 +477,9 @@ efi_init (void) | |||
477 | char *cp, vendor[100] = "unknown"; | 477 | char *cp, vendor[100] = "unknown"; |
478 | int i; | 478 | int i; |
479 | 479 | ||
480 | set_bit(EFI_BOOT, &efi.flags); | ||
481 | set_bit(EFI_64BIT, &efi.flags); | ||
482 | |||
480 | /* | 483 | /* |
481 | * It's too early to be able to use the standard kernel command line | 484 | * It's too early to be able to use the standard kernel command line |
482 | * support... | 485 | * support... |
@@ -529,6 +532,8 @@ efi_init (void) | |||
529 | efi.systab->hdr.revision >> 16, | 532 | efi.systab->hdr.revision >> 16, |
530 | efi.systab->hdr.revision & 0xffff, vendor); | 533 | efi.systab->hdr.revision & 0xffff, vendor); |
531 | 534 | ||
535 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); | ||
536 | |||
532 | palo_phys = EFI_INVALID_TABLE_ADDR; | 537 | palo_phys = EFI_INVALID_TABLE_ADDR; |
533 | 538 | ||
534 | if (efi_config_init(arch_tables) != 0) | 539 | if (efi_config_init(arch_tables) != 0) |
@@ -657,6 +662,8 @@ efi_enter_virtual_mode (void) | |||
657 | return; | 662 | return; |
658 | } | 663 | } |
659 | 664 | ||
665 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); | ||
666 | |||
660 | /* | 667 | /* |
661 | * Now that EFI is in virtual mode, we call the EFI functions more | 668 | * Now that EFI is in virtual mode, we call the EFI functions more |
662 | * efficiently: | 669 | * efficiently: |
diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c index 7fc8c961b1f7..3b0c2aa07857 100644 --- a/arch/ia64/kernel/ftrace.c +++ b/arch/ia64/kernel/ftrace.c | |||
@@ -198,9 +198,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) | |||
198 | } | 198 | } |
199 | 199 | ||
200 | /* run from kstop_machine */ | 200 | /* run from kstop_machine */ |
201 | int __init ftrace_dyn_arch_init(void *data) | 201 | int __init ftrace_dyn_arch_init(void) |
202 | { | 202 | { |
203 | *(unsigned long *)data = 0; | ||
204 | |||
205 | return 0; | 203 | return 0; |
206 | } | 204 | } |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 1034884b77da..0884f5ecbcc3 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -364,7 +364,6 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
364 | 364 | ||
365 | static struct irqaction irq_move_irqaction = { | 365 | static struct irqaction irq_move_irqaction = { |
366 | .handler = smp_irq_move_cleanup_interrupt, | 366 | .handler = smp_irq_move_cleanup_interrupt, |
367 | .flags = IRQF_DISABLED, | ||
368 | .name = "irq_move" | 367 | .name = "irq_move" |
369 | }; | 368 | }; |
370 | 369 | ||
@@ -489,14 +488,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | |||
489 | ia64_srlz_d(); | 488 | ia64_srlz_d(); |
490 | while (vector != IA64_SPURIOUS_INT_VECTOR) { | 489 | while (vector != IA64_SPURIOUS_INT_VECTOR) { |
491 | int irq = local_vector_to_irq(vector); | 490 | int irq = local_vector_to_irq(vector); |
492 | struct irq_desc *desc = irq_to_desc(irq); | ||
493 | 491 | ||
494 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { | 492 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { |
495 | smp_local_flush_tlb(); | 493 | smp_local_flush_tlb(); |
496 | kstat_incr_irqs_this_cpu(irq, desc); | 494 | kstat_incr_irq_this_cpu(irq); |
497 | } else if (unlikely(IS_RESCHEDULE(vector))) { | 495 | } else if (unlikely(IS_RESCHEDULE(vector))) { |
498 | scheduler_ipi(); | 496 | scheduler_ipi(); |
499 | kstat_incr_irqs_this_cpu(irq, desc); | 497 | kstat_incr_irq_this_cpu(irq); |
500 | } else { | 498 | } else { |
501 | ia64_setreg(_IA64_REG_CR_TPR, vector); | 499 | ia64_setreg(_IA64_REG_CR_TPR, vector); |
502 | ia64_srlz_d(); | 500 | ia64_srlz_d(); |
@@ -549,13 +547,12 @@ void ia64_process_pending_intr(void) | |||
549 | */ | 547 | */ |
550 | while (vector != IA64_SPURIOUS_INT_VECTOR) { | 548 | while (vector != IA64_SPURIOUS_INT_VECTOR) { |
551 | int irq = local_vector_to_irq(vector); | 549 | int irq = local_vector_to_irq(vector); |
552 | struct irq_desc *desc = irq_to_desc(irq); | ||
553 | 550 | ||
554 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { | 551 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { |
555 | smp_local_flush_tlb(); | 552 | smp_local_flush_tlb(); |
556 | kstat_incr_irqs_this_cpu(irq, desc); | 553 | kstat_incr_irq_this_cpu(irq); |
557 | } else if (unlikely(IS_RESCHEDULE(vector))) { | 554 | } else if (unlikely(IS_RESCHEDULE(vector))) { |
558 | kstat_incr_irqs_this_cpu(irq, desc); | 555 | kstat_incr_irq_this_cpu(irq); |
559 | } else { | 556 | } else { |
560 | struct pt_regs *old_regs = set_irq_regs(NULL); | 557 | struct pt_regs *old_regs = set_irq_regs(NULL); |
561 | 558 | ||
@@ -602,7 +599,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id) | |||
602 | 599 | ||
603 | static struct irqaction ipi_irqaction = { | 600 | static struct irqaction ipi_irqaction = { |
604 | .handler = handle_IPI, | 601 | .handler = handle_IPI, |
605 | .flags = IRQF_DISABLED, | ||
606 | .name = "IPI" | 602 | .name = "IPI" |
607 | }; | 603 | }; |
608 | 604 | ||
@@ -611,13 +607,11 @@ static struct irqaction ipi_irqaction = { | |||
611 | */ | 607 | */ |
612 | static struct irqaction resched_irqaction = { | 608 | static struct irqaction resched_irqaction = { |
613 | .handler = dummy_handler, | 609 | .handler = dummy_handler, |
614 | .flags = IRQF_DISABLED, | ||
615 | .name = "resched" | 610 | .name = "resched" |
616 | }; | 611 | }; |
617 | 612 | ||
618 | static struct irqaction tlb_irqaction = { | 613 | static struct irqaction tlb_irqaction = { |
619 | .handler = dummy_handler, | 614 | .handler = dummy_handler, |
620 | .flags = IRQF_DISABLED, | ||
621 | .name = "tlb_flush" | 615 | .name = "tlb_flush" |
622 | }; | 616 | }; |
623 | 617 | ||
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index b8edfa75a83f..db7b36bb068b 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -217,7 +217,7 @@ void ia64_mca_printk(const char *fmt, ...) | |||
217 | /* Copy the output into mlogbuf */ | 217 | /* Copy the output into mlogbuf */ |
218 | if (oops_in_progress) { | 218 | if (oops_in_progress) { |
219 | /* mlogbuf was abandoned, use printk directly instead. */ | 219 | /* mlogbuf was abandoned, use printk directly instead. */ |
220 | printk(temp_buf); | 220 | printk("%s", temp_buf); |
221 | } else { | 221 | } else { |
222 | spin_lock(&mlogbuf_wlock); | 222 | spin_lock(&mlogbuf_wlock); |
223 | for (p = temp_buf; *p; p++) { | 223 | for (p = temp_buf; *p; p++) { |
@@ -268,7 +268,7 @@ void ia64_mlogbuf_dump(void) | |||
268 | } | 268 | } |
269 | *p = '\0'; | 269 | *p = '\0'; |
270 | if (temp_buf[0]) | 270 | if (temp_buf[0]) |
271 | printk(temp_buf); | 271 | printk("%s", temp_buf); |
272 | mlogbuf_start = index; | 272 | mlogbuf_start = index; |
273 | 273 | ||
274 | mlogbuf_timestamp = 0; | 274 | mlogbuf_timestamp = 0; |
@@ -1772,38 +1772,32 @@ __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); | |||
1772 | 1772 | ||
1773 | static struct irqaction cmci_irqaction = { | 1773 | static struct irqaction cmci_irqaction = { |
1774 | .handler = ia64_mca_cmc_int_handler, | 1774 | .handler = ia64_mca_cmc_int_handler, |
1775 | .flags = IRQF_DISABLED, | ||
1776 | .name = "cmc_hndlr" | 1775 | .name = "cmc_hndlr" |
1777 | }; | 1776 | }; |
1778 | 1777 | ||
1779 | static struct irqaction cmcp_irqaction = { | 1778 | static struct irqaction cmcp_irqaction = { |
1780 | .handler = ia64_mca_cmc_int_caller, | 1779 | .handler = ia64_mca_cmc_int_caller, |
1781 | .flags = IRQF_DISABLED, | ||
1782 | .name = "cmc_poll" | 1780 | .name = "cmc_poll" |
1783 | }; | 1781 | }; |
1784 | 1782 | ||
1785 | static struct irqaction mca_rdzv_irqaction = { | 1783 | static struct irqaction mca_rdzv_irqaction = { |
1786 | .handler = ia64_mca_rendez_int_handler, | 1784 | .handler = ia64_mca_rendez_int_handler, |
1787 | .flags = IRQF_DISABLED, | ||
1788 | .name = "mca_rdzv" | 1785 | .name = "mca_rdzv" |
1789 | }; | 1786 | }; |
1790 | 1787 | ||
1791 | static struct irqaction mca_wkup_irqaction = { | 1788 | static struct irqaction mca_wkup_irqaction = { |
1792 | .handler = ia64_mca_wakeup_int_handler, | 1789 | .handler = ia64_mca_wakeup_int_handler, |
1793 | .flags = IRQF_DISABLED, | ||
1794 | .name = "mca_wkup" | 1790 | .name = "mca_wkup" |
1795 | }; | 1791 | }; |
1796 | 1792 | ||
1797 | #ifdef CONFIG_ACPI | 1793 | #ifdef CONFIG_ACPI |
1798 | static struct irqaction mca_cpe_irqaction = { | 1794 | static struct irqaction mca_cpe_irqaction = { |
1799 | .handler = ia64_mca_cpe_int_handler, | 1795 | .handler = ia64_mca_cpe_int_handler, |
1800 | .flags = IRQF_DISABLED, | ||
1801 | .name = "cpe_hndlr" | 1796 | .name = "cpe_hndlr" |
1802 | }; | 1797 | }; |
1803 | 1798 | ||
1804 | static struct irqaction mca_cpep_irqaction = { | 1799 | static struct irqaction mca_cpep_irqaction = { |
1805 | .handler = ia64_mca_cpe_int_caller, | 1800 | .handler = ia64_mca_cpe_int_caller, |
1806 | .flags = IRQF_DISABLED, | ||
1807 | .name = "cpe_poll" | 1801 | .name = "cpe_poll" |
1808 | }; | 1802 | }; |
1809 | #endif /* CONFIG_ACPI */ | 1803 | #endif /* CONFIG_ACPI */ |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index fb2f1e622877..c430f9198d1b 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -17,12 +17,9 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, | |||
17 | { | 17 | { |
18 | struct msi_msg msg; | 18 | struct msi_msg msg; |
19 | u32 addr, data; | 19 | u32 addr, data; |
20 | int cpu = first_cpu(*cpu_mask); | 20 | int cpu = cpumask_first_and(cpu_mask, cpu_online_mask); |
21 | unsigned int irq = idata->irq; | 21 | unsigned int irq = idata->irq; |
22 | 22 | ||
23 | if (!cpu_online(cpu)) | ||
24 | return -1; | ||
25 | |||
26 | if (irq_prepare_move(irq, cpu)) | 23 | if (irq_prepare_move(irq, cpu)) |
27 | return -1; | 24 | return -1; |
28 | 25 | ||
@@ -139,10 +136,7 @@ static int dmar_msi_set_affinity(struct irq_data *data, | |||
139 | unsigned int irq = data->irq; | 136 | unsigned int irq = data->irq; |
140 | struct irq_cfg *cfg = irq_cfg + irq; | 137 | struct irq_cfg *cfg = irq_cfg + irq; |
141 | struct msi_msg msg; | 138 | struct msi_msg msg; |
142 | int cpu = cpumask_first(mask); | 139 | int cpu = cpumask_first_and(mask, cpu_online_mask); |
143 | |||
144 | if (!cpu_online(cpu)) | ||
145 | return -1; | ||
146 | 140 | ||
147 | if (irq_prepare_move(irq, cpu)) | 141 | if (irq_prepare_move(irq, cpu)) |
148 | return -1; | 142 | return -1; |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index cb592773c78b..d841c4bd6864 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -6387,7 +6387,6 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) | |||
6387 | 6387 | ||
6388 | static struct irqaction perfmon_irqaction = { | 6388 | static struct irqaction perfmon_irqaction = { |
6389 | .handler = pfm_interrupt_handler, | 6389 | .handler = pfm_interrupt_handler, |
6390 | .flags = IRQF_DISABLED, | ||
6391 | .name = "perfmon" | 6390 | .name = "perfmon" |
6392 | }; | 6391 | }; |
6393 | 6392 | ||
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index fbaac1afb844..71c52bc7c28d 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -380,7 +380,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs) | |||
380 | 380 | ||
381 | static struct irqaction timer_irqaction = { | 381 | static struct irqaction timer_irqaction = { |
382 | .handler = timer_interrupt, | 382 | .handler = timer_interrupt, |
383 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, | 383 | .flags = IRQF_IRQPOLL, |
384 | .name = "timer" | 384 | .name = "timer" |
385 | }; | 385 | }; |
386 | 386 | ||
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a96bcf83a735..20e8a9b21d75 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
98 | /* attempt to allocate a granule's worth of cached memory pages */ | 98 | /* attempt to allocate a granule's worth of cached memory pages */ |
99 | 99 | ||
100 | page = alloc_pages_exact_node(nid, | 100 | page = alloc_pages_exact_node(nid, |
101 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 101 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); | 102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
103 | if (!page) { | 103 | if (!page) { |
104 | mutex_unlock(&uc_pool->add_chunk_mutex); | 104 | mutex_unlock(&uc_pool->add_chunk_mutex); |