diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 33 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/ia64_ksyms.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 15 | ||||
-rw-r--r-- | arch/ia64/kernel/relocate_kernel.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 27 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 11 |
12 files changed, 57 insertions, 70 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index baec6f00f7f3..40574ae11401 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void) | |||
702 | printk(KERN_ERR PREFIX | 702 | printk(KERN_ERR PREFIX |
703 | "Error parsing MADT - no LAPIC entries\n"); | 703 | "Error parsing MADT - no LAPIC entries\n"); |
704 | 704 | ||
705 | #ifdef CONFIG_SMP | ||
706 | if (available_cpus == 0) { | ||
707 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
708 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
709 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
710 | hard_smp_processor_id(); | ||
711 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
712 | } | ||
713 | smp_boot_data.cpu_count = available_cpus; | ||
714 | #endif | ||
715 | /* Make boot-up look pretty */ | ||
716 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
717 | total_cpus); | ||
718 | |||
705 | return 0; | 719 | return 0; |
706 | } | 720 | } |
707 | 721 | ||
708 | |||
709 | |||
710 | int __init acpi_boot_init(void) | 722 | int __init acpi_boot_init(void) |
711 | { | 723 | { |
712 | 724 | ||
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void) | |||
769 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) | 781 | if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) |
770 | printk(KERN_ERR PREFIX "Can't find FADT\n"); | 782 | printk(KERN_ERR PREFIX "Can't find FADT\n"); |
771 | 783 | ||
784 | #ifdef CONFIG_ACPI_NUMA | ||
772 | #ifdef CONFIG_SMP | 785 | #ifdef CONFIG_SMP |
773 | if (available_cpus == 0) { | ||
774 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | ||
775 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | ||
776 | smp_boot_data.cpu_phys_id[available_cpus] = | ||
777 | hard_smp_processor_id(); | ||
778 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
779 | } | ||
780 | smp_boot_data.cpu_count = available_cpus; | ||
781 | |||
782 | smp_build_cpu_map(); | ||
783 | # ifdef CONFIG_ACPI_NUMA | ||
784 | if (srat_num_cpus == 0) { | 786 | if (srat_num_cpus == 0) { |
785 | int cpu, i = 1; | 787 | int cpu, i = 1; |
786 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) | 788 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) |
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void) | |||
789 | node_cpuid[i++].phys_id = | 791 | node_cpuid[i++].phys_id = |
790 | smp_boot_data.cpu_phys_id[cpu]; | 792 | smp_boot_data.cpu_phys_id[cpu]; |
791 | } | 793 | } |
792 | # endif | ||
793 | #endif | 794 | #endif |
794 | #ifdef CONFIG_ACPI_NUMA | ||
795 | build_cpu_to_node_map(); | 795 | build_cpu_to_node_map(); |
796 | #endif | 796 | #endif |
797 | /* Make boot-up look pretty */ | ||
798 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, | ||
799 | total_cpus); | ||
800 | return 0; | 797 | return 0; |
801 | } | 798 | } |
802 | 799 | ||
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 696eff28a0c4..17a9fba38930 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop) | |||
1051 | * intermediate precision so that we can produce a full 64-bit result. | 1051 | * intermediate precision so that we can produce a full 64-bit result. |
1052 | */ | 1052 | */ |
1053 | GLOBAL_ENTRY(ia64_native_sched_clock) | 1053 | GLOBAL_ENTRY(ia64_native_sched_clock) |
1054 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1054 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) | 1055 | mov.m r9=ar.itc // fetch cycle-counter (35 cyc) |
1056 | ;; | 1056 | ;; |
1057 | ldf8 f8=[r8] | 1057 | ldf8 f8=[r8] |
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock | |||
1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 1077 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1078 | GLOBAL_ENTRY(cycle_to_cputime) | 1078 | GLOBAL_ENTRY(cycle_to_cputime) |
1079 | alloc r16=ar.pfs,1,0,0,0 | 1079 | alloc r16=ar.pfs,1,0,0,0 |
1080 | addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 | 1080 | addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 |
1081 | ;; | 1081 | ;; |
1082 | ldf8 f8=[r8] | 1082 | ldf8 f8=[r8] |
1083 | ;; | 1083 | ;; |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 14d39e300627..461b99902bf6 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic | |||
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
33 | EXPORT_SYMBOL(per_cpu__cpu_info); | 33 | EXPORT_SYMBOL(per_cpu__ia64_cpu_info); |
34 | #ifdef CONFIG_SMP | 34 | #ifdef CONFIG_SMP |
35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); | 35 | EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); |
36 | #endif | 36 | #endif |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d393908c..95ac77aeae9b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi, | |||
793 | goto unlock_iosapic_lock; | 793 | goto unlock_iosapic_lock; |
794 | } | 794 | } |
795 | 795 | ||
796 | spin_lock(&irq_desc[irq].lock); | 796 | raw_spin_lock(&irq_desc[irq].lock); |
797 | dest = get_target_cpu(gsi, irq); | 797 | dest = get_target_cpu(gsi, irq); |
798 | dmode = choose_dmode(); | 798 | dmode = choose_dmode(); |
799 | err = register_intr(gsi, irq, dmode, polarity, trigger); | 799 | err = register_intr(gsi, irq, dmode, polarity, trigger); |
800 | if (err < 0) { | 800 | if (err < 0) { |
801 | spin_unlock(&irq_desc[irq].lock); | 801 | raw_spin_unlock(&irq_desc[irq].lock); |
802 | irq = err; | 802 | irq = err; |
803 | goto unlock_iosapic_lock; | 803 | goto unlock_iosapic_lock; |
804 | } | 804 | } |
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi, | |||
817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | 817 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), |
818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); | 818 | cpu_logical_id(dest), dest, irq_to_vector(irq)); |
819 | 819 | ||
820 | spin_unlock(&irq_desc[irq].lock); | 820 | raw_spin_unlock(&irq_desc[irq].lock); |
821 | unlock_iosapic_lock: | 821 | unlock_iosapic_lock: |
822 | spin_unlock_irqrestore(&iosapic_lock, flags); | 822 | spin_unlock_irqrestore(&iosapic_lock, flags); |
823 | return irq; | 823 | return irq; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d8951229e7c..94ee9d067cbd 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | if (i < NR_IRQS) { | 73 | if (i < NR_IRQS) { |
74 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 74 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
75 | action = irq_desc[i].action; | 75 | action = irq_desc[i].action; |
76 | if (!action) | 76 | if (!action) |
77 | goto skip; | 77 | goto skip; |
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
91 | 91 | ||
92 | seq_putc(p, '\n'); | 92 | seq_putc(p, '\n'); |
93 | skip: | 93 | skip: |
94 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 94 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
95 | } else if (i == NR_IRQS) | 95 | } else if (i == NR_IRQS) |
96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | 96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
97 | return 0; | 97 | return 0; |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a1..d4093a173a3e 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu) | |||
260 | } | 260 | } |
261 | 261 | ||
262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) | 262 | #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) |
263 | #define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR | ||
264 | 263 | ||
265 | static enum vector_domain_type { | 264 | static enum vector_domain_type { |
266 | VECTOR_DOMAIN_NONE, | 265 | VECTOR_DOMAIN_NONE, |
@@ -345,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
345 | 344 | ||
346 | desc = irq_desc + irq; | 345 | desc = irq_desc + irq; |
347 | cfg = irq_cfg + irq; | 346 | cfg = irq_cfg + irq; |
348 | spin_lock(&desc->lock); | 347 | raw_spin_lock(&desc->lock); |
349 | if (!cfg->move_cleanup_count) | 348 | if (!cfg->move_cleanup_count) |
350 | goto unlock; | 349 | goto unlock; |
351 | 350 | ||
@@ -358,7 +357,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) | |||
358 | spin_unlock_irqrestore(&vector_lock, flags); | 357 | spin_unlock_irqrestore(&vector_lock, flags); |
359 | cfg->move_cleanup_count--; | 358 | cfg->move_cleanup_count--; |
360 | unlock: | 359 | unlock: |
361 | spin_unlock(&desc->lock); | 360 | raw_spin_unlock(&desc->lock); |
362 | } | 361 | } |
363 | return IRQ_HANDLED; | 362 | return IRQ_HANDLED; |
364 | } | 363 | } |
@@ -659,11 +658,8 @@ init_IRQ (void) | |||
659 | register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); | 658 | register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); |
660 | #ifdef CONFIG_SMP | 659 | #ifdef CONFIG_SMP |
661 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) | 660 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) |
662 | if (vector_domain_type != VECTOR_DOMAIN_NONE) { | 661 | if (vector_domain_type != VECTOR_DOMAIN_NONE) |
663 | BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); | ||
664 | IA64_FIRST_DEVICE_VECTOR++; | ||
665 | register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); | 662 | register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); |
666 | } | ||
667 | #endif | 663 | #endif |
668 | #endif | 664 | #endif |
669 | #ifdef CONFIG_PERFMON | 665 | #ifdef CONFIG_PERFMON |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 496ac7a99488..32f2639e9b0a 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -888,9 +888,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) | |||
888 | } | 888 | } |
889 | 889 | ||
890 | static void | 890 | static void |
891 | finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, | 891 | finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, |
892 | unsigned long *nat) | 892 | unsigned long *nat) |
893 | { | 893 | { |
894 | const pal_min_state_area_t *ms = sos->pal_min_state; | ||
894 | const u64 *bank; | 895 | const u64 *bank; |
895 | 896 | ||
896 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | 897 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use |
@@ -904,6 +905,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, | |||
904 | regs->cr_iip = ms->pmsa_xip; | 905 | regs->cr_iip = ms->pmsa_xip; |
905 | regs->cr_ipsr = ms->pmsa_xpsr; | 906 | regs->cr_ipsr = ms->pmsa_xpsr; |
906 | regs->cr_ifs = ms->pmsa_xfs; | 907 | regs->cr_ifs = ms->pmsa_xfs; |
908 | |||
909 | sos->iip = ms->pmsa_iip; | ||
910 | sos->ipsr = ms->pmsa_ipsr; | ||
911 | sos->ifs = ms->pmsa_ifs; | ||
907 | } | 912 | } |
908 | regs->pr = ms->pmsa_pr; | 913 | regs->pr = ms->pmsa_pr; |
909 | regs->b0 = ms->pmsa_br0; | 914 | regs->b0 = ms->pmsa_br0; |
@@ -1079,7 +1084,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
1079 | memcpy(old_regs, regs, sizeof(*regs)); | 1084 | memcpy(old_regs, regs, sizeof(*regs)); |
1080 | old_regs->loadrs = loadrs; | 1085 | old_regs->loadrs = loadrs; |
1081 | old_unat = old_regs->ar_unat; | 1086 | old_unat = old_regs->ar_unat; |
1082 | finish_pt_regs(old_regs, ms, &old_unat); | 1087 | finish_pt_regs(old_regs, sos, &old_unat); |
1083 | 1088 | ||
1084 | /* Next stack a struct switch_stack. mca_asm.S built a partial | 1089 | /* Next stack a struct switch_stack. mca_asm.S built a partial |
1085 | * switch_stack, copy it and fill in the blanks using pt_regs and | 1090 | * switch_stack, copy it and fill in the blanks using pt_regs and |
@@ -1150,7 +1155,7 @@ no_mod: | |||
1150 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", | 1155 | mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", |
1151 | smp_processor_id(), type, msg); | 1156 | smp_processor_id(), type, msg); |
1152 | old_unat = regs->ar_unat; | 1157 | old_unat = regs->ar_unat; |
1153 | finish_pt_regs(regs, ms, &old_unat); | 1158 | finish_pt_regs(regs, sos, &old_unat); |
1154 | return previous_current; | 1159 | return previous_current; |
1155 | } | 1160 | } |
1156 | 1161 | ||
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 7461d2573d41..d5bdf9de36b6 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -59,7 +59,7 @@ | |||
59 | ia64_do_tlb_purge: | 59 | ia64_do_tlb_purge: |
60 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 60 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
61 | 61 | ||
62 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 62 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
63 | ;; | 63 | ;; |
64 | addl r17=O(PTCE_STRIDE),r2 | 64 | addl r17=O(PTCE_STRIDE),r2 |
65 | addl r2=O(PTCE_BASE),r2 | 65 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 599b233bef75..5246285a95fb 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2200,7 +2200,7 @@ pfm_alloc_file(pfm_context_t *ctx) | |||
2200 | { | 2200 | { |
2201 | struct file *file; | 2201 | struct file *file; |
2202 | struct inode *inode; | 2202 | struct inode *inode; |
2203 | struct dentry *dentry; | 2203 | struct path path; |
2204 | char name[32]; | 2204 | char name[32]; |
2205 | struct qstr this; | 2205 | struct qstr this; |
2206 | 2206 | ||
@@ -2225,18 +2225,19 @@ pfm_alloc_file(pfm_context_t *ctx) | |||
2225 | /* | 2225 | /* |
2226 | * allocate a new dcache entry | 2226 | * allocate a new dcache entry |
2227 | */ | 2227 | */ |
2228 | dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); | 2228 | path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); |
2229 | if (!dentry) { | 2229 | if (!path.dentry) { |
2230 | iput(inode); | 2230 | iput(inode); |
2231 | return ERR_PTR(-ENOMEM); | 2231 | return ERR_PTR(-ENOMEM); |
2232 | } | 2232 | } |
2233 | path.mnt = mntget(pfmfs_mnt); | ||
2233 | 2234 | ||
2234 | dentry->d_op = &pfmfs_dentry_operations; | 2235 | path.dentry->d_op = &pfmfs_dentry_operations; |
2235 | d_add(dentry, inode); | 2236 | d_add(path.dentry, inode); |
2236 | 2237 | ||
2237 | file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops); | 2238 | file = alloc_file(&path, FMODE_READ, &pfm_file_ops); |
2238 | if (!file) { | 2239 | if (!file) { |
2239 | dput(dentry); | 2240 | path_put(&path); |
2240 | return ERR_PTR(-ENFILE); | 2241 | return ERR_PTR(-ENFILE); |
2241 | } | 2242 | } |
2242 | 2243 | ||
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S index 32f6fc131fbe..c370e02f0061 100644 --- a/arch/ia64/kernel/relocate_kernel.S +++ b/arch/ia64/kernel/relocate_kernel.S | |||
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel) | |||
61 | 61 | ||
62 | // purge all TC entries | 62 | // purge all TC entries |
63 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 63 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
64 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 64 | GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2 |
65 | ;; | 65 | ;; |
66 | addl r17=O(PTCE_STRIDE),r2 | 66 | addl r17=O(PTCE_STRIDE),r2 |
67 | addl r2=O(PTCE_BASE),r2 | 67 | addl r2=O(PTCE_BASE),r2 |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1de86c96801d..a1ea87919777 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS]; | |||
74 | EXPORT_SYMBOL(__per_cpu_offset); | 74 | EXPORT_SYMBOL(__per_cpu_offset); |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 77 | DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); |
78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); | 78 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
79 | unsigned long ia64_cycles_per_usec; | 79 | unsigned long ia64_cycles_per_usec; |
80 | struct ia64_boot_param *ia64_boot_param; | 80 | struct ia64_boot_param *ia64_boot_param; |
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p) | |||
566 | early_acpi_boot_init(); | 566 | early_acpi_boot_init(); |
567 | # ifdef CONFIG_ACPI_NUMA | 567 | # ifdef CONFIG_ACPI_NUMA |
568 | acpi_numa_init(); | 568 | acpi_numa_init(); |
569 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 569 | # ifdef CONFIG_ACPI_HOTPLUG_CPU |
570 | prefill_possible_map(); | 570 | prefill_possible_map(); |
571 | #endif | 571 | # endif |
572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? | 572 | per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? |
573 | 32 : cpus_weight(early_cpu_possible_map)), | 573 | 32 : cpus_weight(early_cpu_possible_map)), |
574 | additional_cpus > 0 ? additional_cpus : 0); | 574 | additional_cpus > 0 ? additional_cpus : 0); |
575 | # endif | 575 | # endif |
576 | #else | ||
577 | # ifdef CONFIG_SMP | ||
578 | smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ | ||
579 | # endif | ||
580 | #endif /* CONFIG_APCI_BOOT */ | 576 | #endif /* CONFIG_APCI_BOOT */ |
581 | 577 | ||
578 | #ifdef CONFIG_SMP | ||
579 | smp_build_cpu_map(); | ||
580 | #endif | ||
582 | find_memory(); | 581 | find_memory(); |
583 | 582 | ||
584 | /* process SAL system table: */ | 583 | /* process SAL system table: */ |
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c) | |||
856 | } | 855 | } |
857 | 856 | ||
858 | /* | 857 | /* |
859 | * In UP configuration, setup_per_cpu_areas() is defined in | ||
860 | * include/linux/percpu.h | ||
861 | */ | ||
862 | #ifdef CONFIG_SMP | ||
863 | void __init | ||
864 | setup_per_cpu_areas (void) | ||
865 | { | ||
866 | /* start_kernel() requires this... */ | ||
867 | } | ||
868 | #endif | ||
869 | |||
870 | /* | ||
871 | * Do the following calculations: | 858 | * Do the following calculations: |
872 | * | 859 | * |
873 | * 1. the max. cache line size. | 860 | * 1. the max. cache line size. |
@@ -980,7 +967,7 @@ cpu_init (void) | |||
980 | * depends on the data returned by identify_cpu(). We break the dependency by | 967 | * depends on the data returned by identify_cpu(). We break the dependency by |
981 | * accessing cpu_data() through the canonical per-CPU address. | 968 | * accessing cpu_data() through the canonical per-CPU address. |
982 | */ | 969 | */ |
983 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); | 970 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); |
984 | identify_cpu(cpu_info); | 971 | identify_cpu(cpu_info); |
985 | 972 | ||
986 | #ifdef CONFIG_MCKINLEY | 973 | #ifdef CONFIG_MCKINLEY |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 0a0c77b2c988..1295ba327f6f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -166,6 +166,12 @@ SECTIONS | |||
166 | } | 166 | } |
167 | #endif | 167 | #endif |
168 | 168 | ||
169 | #ifdef CONFIG_SMP | ||
170 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
171 | __cpu0_per_cpu = .; | ||
172 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
173 | #endif | ||
174 | |||
169 | . = ALIGN(PAGE_SIZE); | 175 | . = ALIGN(PAGE_SIZE); |
170 | __init_end = .; | 176 | __init_end = .; |
171 | 177 | ||
@@ -198,11 +204,6 @@ SECTIONS | |||
198 | data : { } :data | 204 | data : { } :data |
199 | .data : AT(ADDR(.data) - LOAD_OFFSET) | 205 | .data : AT(ADDR(.data) - LOAD_OFFSET) |
200 | { | 206 | { |
201 | #ifdef CONFIG_SMP | ||
202 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
203 | __cpu0_per_cpu = .; | ||
204 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
205 | #endif | ||
206 | INIT_TASK_DATA(PAGE_SIZE) | 207 | INIT_TASK_DATA(PAGE_SIZE) |
207 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) | 208 | CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) |
208 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) | 209 | READ_MOSTLY_DATA(SMP_CACHE_BYTES) |