diff options
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/Kconfig | 1 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.c | 21 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/p4-clockmod.c | 31 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.h | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/speedstep-lib.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/speedstep-smi.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/i8253.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 4 | ||||
-rw-r--r-- | arch/i386/mach-voyager/setup.c | 8 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_cat.c | 4 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_smp.c | 97 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_thread.c | 69 | ||||
-rw-r--r-- | arch/i386/pci/fixup.c | 2 | ||||
-rw-r--r-- | arch/i386/pci/i386.c | 4 |
16 files changed, 124 insertions, 140 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index c6f8d6856c4d..a9af760c7e5f 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -1057,6 +1057,7 @@ config PCI | |||
1057 | bool "PCI support" if !X86_VISWS | 1057 | bool "PCI support" if !X86_VISWS |
1058 | depends on !X86_VOYAGER | 1058 | depends on !X86_VOYAGER |
1059 | default y if X86_VISWS | 1059 | default y if X86_VISWS |
1060 | select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC) | ||
1060 | help | 1061 | help |
1061 | Find out whether you have a PCI motherboard. PCI is the name of a | 1062 | Find out whether you have a PCI motherboard. PCI is the name of a |
1062 | bus system, i.e. the way the CPU talks to the other stuff inside | 1063 | bus system, i.e. the way the CPU talks to the other stuff inside |
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c index 2b030d6ccbf7..a3df9c039bd4 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c | |||
@@ -590,20 +590,23 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle, | |||
590 | static int enable_arbiter_disable(void) | 590 | static int enable_arbiter_disable(void) |
591 | { | 591 | { |
592 | struct pci_dev *dev; | 592 | struct pci_dev *dev; |
593 | int status; | ||
593 | int reg; | 594 | int reg; |
594 | u8 pci_cmd; | 595 | u8 pci_cmd; |
595 | 596 | ||
597 | status = 1; | ||
596 | /* Find PLE133 host bridge */ | 598 | /* Find PLE133 host bridge */ |
597 | reg = 0x78; | 599 | reg = 0x78; |
598 | dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, NULL); | 600 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, |
601 | NULL); | ||
599 | /* Find CLE266 host bridge */ | 602 | /* Find CLE266 host bridge */ |
600 | if (dev == NULL) { | 603 | if (dev == NULL) { |
601 | reg = 0x76; | 604 | reg = 0x76; |
602 | dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL); | 605 | dev = pci_get_device(PCI_VENDOR_ID_VIA, |
606 | PCI_DEVICE_ID_VIA_862X_0, NULL); | ||
603 | /* Find CN400 V-Link host bridge */ | 607 | /* Find CN400 V-Link host bridge */ |
604 | if (dev == NULL) | 608 | if (dev == NULL) |
605 | dev = pci_find_device(PCI_VENDOR_ID_VIA, 0x7259, NULL); | 609 | dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL); |
606 | |||
607 | } | 610 | } |
608 | if (dev != NULL) { | 611 | if (dev != NULL) { |
609 | /* Enable access to port 0x22 */ | 612 | /* Enable access to port 0x22 */ |
@@ -615,10 +618,11 @@ static int enable_arbiter_disable(void) | |||
615 | if (!(pci_cmd & 1<<7)) { | 618 | if (!(pci_cmd & 1<<7)) { |
616 | printk(KERN_ERR PFX | 619 | printk(KERN_ERR PFX |
617 | "Can't enable access to port 0x22.\n"); | 620 | "Can't enable access to port 0x22.\n"); |
618 | return 0; | 621 | status = 0; |
619 | } | 622 | } |
620 | } | 623 | } |
621 | return 1; | 624 | pci_dev_put(dev); |
625 | return status; | ||
622 | } | 626 | } |
623 | return 0; | 627 | return 0; |
624 | } | 628 | } |
@@ -629,7 +633,7 @@ static int longhaul_setup_vt8235(void) | |||
629 | u8 pci_cmd; | 633 | u8 pci_cmd; |
630 | 634 | ||
631 | /* Find VT8235 southbridge */ | 635 | /* Find VT8235 southbridge */ |
632 | dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); | 636 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); |
633 | if (dev != NULL) { | 637 | if (dev != NULL) { |
634 | /* Set transition time to max */ | 638 | /* Set transition time to max */ |
635 | pci_read_config_byte(dev, 0xec, &pci_cmd); | 639 | pci_read_config_byte(dev, 0xec, &pci_cmd); |
@@ -641,6 +645,7 @@ static int longhaul_setup_vt8235(void) | |||
641 | pci_read_config_byte(dev, 0xe5, &pci_cmd); | 645 | pci_read_config_byte(dev, 0xe5, &pci_cmd); |
642 | pci_cmd |= 1 << 7; | 646 | pci_cmd |= 1 << 7; |
643 | pci_write_config_byte(dev, 0xe5, pci_cmd); | 647 | pci_write_config_byte(dev, 0xe5, pci_cmd); |
648 | pci_dev_put(dev); | ||
644 | return 1; | 649 | return 1; |
645 | } | 650 | } |
646 | return 0; | 651 | return 0; |
@@ -678,7 +683,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
678 | sizeof(samuel2_eblcr)); | 683 | sizeof(samuel2_eblcr)); |
679 | break; | 684 | break; |
680 | case 1 ... 15: | 685 | case 1 ... 15: |
681 | longhaul_version = TYPE_LONGHAUL_V2; | 686 | longhaul_version = TYPE_LONGHAUL_V1; |
682 | if (c->x86_mask < 8) { | 687 | if (c->x86_mask < 8) { |
683 | cpu_model = CPU_SAMUEL2; | 688 | cpu_model = CPU_SAMUEL2; |
684 | cpuname = "C3 'Samuel 2' [C5B]"; | 689 | cpuname = "C3 'Samuel 2' [C5B]"; |
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c index 4786fedca6eb..4c76b511e194 100644 --- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/cpufreq.h> | 27 | #include <linux/cpufreq.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/cpumask.h> | 29 | #include <linux/cpumask.h> |
30 | #include <linux/sched.h> /* current / set_cpus_allowed() */ | ||
31 | 30 | ||
32 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
33 | #include <asm/msr.h> | 32 | #include <asm/msr.h> |
@@ -62,7 +61,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) | |||
62 | if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV)) | 61 | if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV)) |
63 | return -EINVAL; | 62 | return -EINVAL; |
64 | 63 | ||
65 | rdmsr(MSR_IA32_THERM_STATUS, l, h); | 64 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); |
66 | 65 | ||
67 | if (l & 0x01) | 66 | if (l & 0x01) |
68 | dprintk("CPU#%d currently thermal throttled\n", cpu); | 67 | dprintk("CPU#%d currently thermal throttled\n", cpu); |
@@ -70,10 +69,10 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) | |||
70 | if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) | 69 | if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT)) |
71 | newstate = DC_38PT; | 70 | newstate = DC_38PT; |
72 | 71 | ||
73 | rdmsr(MSR_IA32_THERM_CONTROL, l, h); | 72 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); |
74 | if (newstate == DC_DISABLE) { | 73 | if (newstate == DC_DISABLE) { |
75 | dprintk("CPU#%d disabling modulation\n", cpu); | 74 | dprintk("CPU#%d disabling modulation\n", cpu); |
76 | wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); | 75 | wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); |
77 | } else { | 76 | } else { |
78 | dprintk("CPU#%d setting duty cycle to %d%%\n", | 77 | dprintk("CPU#%d setting duty cycle to %d%%\n", |
79 | cpu, ((125 * newstate) / 10)); | 78 | cpu, ((125 * newstate) / 10)); |
@@ -84,7 +83,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) | |||
84 | */ | 83 | */ |
85 | l = (l & ~14); | 84 | l = (l & ~14); |
86 | l = l | (1<<4) | ((newstate & 0x7)<<1); | 85 | l = l | (1<<4) | ((newstate & 0x7)<<1); |
87 | wrmsr(MSR_IA32_THERM_CONTROL, l, h); | 86 | wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); |
88 | } | 87 | } |
89 | 88 | ||
90 | return 0; | 89 | return 0; |
@@ -111,7 +110,6 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
111 | { | 110 | { |
112 | unsigned int newstate = DC_RESV; | 111 | unsigned int newstate = DC_RESV; |
113 | struct cpufreq_freqs freqs; | 112 | struct cpufreq_freqs freqs; |
114 | cpumask_t cpus_allowed; | ||
115 | int i; | 113 | int i; |
116 | 114 | ||
117 | if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) | 115 | if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate)) |
@@ -132,17 +130,8 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, | |||
132 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software | 130 | /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software |
133 | * Developer's Manual, Volume 3 | 131 | * Developer's Manual, Volume 3 |
134 | */ | 132 | */ |
135 | cpus_allowed = current->cpus_allowed; | 133 | for_each_cpu_mask(i, policy->cpus) |
136 | |||
137 | for_each_cpu_mask(i, policy->cpus) { | ||
138 | cpumask_t this_cpu = cpumask_of_cpu(i); | ||
139 | |||
140 | set_cpus_allowed(current, this_cpu); | ||
141 | BUG_ON(smp_processor_id() != i); | ||
142 | |||
143 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); | 134 | cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); |
144 | } | ||
145 | set_cpus_allowed(current, cpus_allowed); | ||
146 | 135 | ||
147 | /* notifiers */ | 136 | /* notifiers */ |
148 | for_each_cpu_mask(i, policy->cpus) { | 137 | for_each_cpu_mask(i, policy->cpus) { |
@@ -256,17 +245,9 @@ static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) | |||
256 | 245 | ||
257 | static unsigned int cpufreq_p4_get(unsigned int cpu) | 246 | static unsigned int cpufreq_p4_get(unsigned int cpu) |
258 | { | 247 | { |
259 | cpumask_t cpus_allowed; | ||
260 | u32 l, h; | 248 | u32 l, h; |
261 | 249 | ||
262 | cpus_allowed = current->cpus_allowed; | 250 | rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); |
263 | |||
264 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
265 | BUG_ON(smp_processor_id() != cpu); | ||
266 | |||
267 | rdmsr(MSR_IA32_THERM_CONTROL, l, h); | ||
268 | |||
269 | set_cpus_allowed(current, cpus_allowed); | ||
270 | 251 | ||
271 | if (l & 0x10) { | 252 | if (l & 0x10) { |
272 | l = l >> 1; | 253 | l = l >> 1; |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index fe3b67005ebb..7cf3d207b6b3 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -661,7 +661,8 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, | |||
661 | 661 | ||
662 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); | 662 | dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); |
663 | data->powernow_table = powernow_table; | 663 | data->powernow_table = powernow_table; |
664 | print_basics(data); | 664 | if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) |
665 | print_basics(data); | ||
665 | 666 | ||
666 | for (j = 0; j < data->numps; j++) | 667 | for (j = 0; j < data->numps; j++) |
667 | if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid)) | 668 | if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid)) |
@@ -814,7 +815,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
814 | 815 | ||
815 | /* fill in data */ | 816 | /* fill in data */ |
816 | data->numps = data->acpi_data.state_count; | 817 | data->numps = data->acpi_data.state_count; |
817 | print_basics(data); | 818 | if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) |
819 | print_basics(data); | ||
818 | powernow_k8_acpi_pst_values(data, 0); | 820 | powernow_k8_acpi_pst_values(data, 0); |
819 | 821 | ||
820 | /* notify BIOS that we exist */ | 822 | /* notify BIOS that we exist */ |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h index 0fb2a3001ba5..95be5013c984 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -215,8 +215,10 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); | |||
215 | 215 | ||
216 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); | 216 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); |
217 | 217 | ||
218 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
218 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); | 219 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); |
219 | static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); | 220 | static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); |
221 | #endif | ||
220 | 222 | ||
221 | #ifdef CONFIG_SMP | 223 | #ifdef CONFIG_SMP |
222 | static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) | 224 | static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) |
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c index f43b987f952b..35489fd68852 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -720,6 +720,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
720 | cpu_set(j, set_mask); | 720 | cpu_set(j, set_mask); |
721 | 721 | ||
722 | set_cpus_allowed(current, set_mask); | 722 | set_cpus_allowed(current, set_mask); |
723 | preempt_disable(); | ||
723 | if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { | 724 | if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { |
724 | dprintk("couldn't limit to CPUs in this domain\n"); | 725 | dprintk("couldn't limit to CPUs in this domain\n"); |
725 | retval = -EAGAIN; | 726 | retval = -EAGAIN; |
@@ -727,6 +728,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
727 | /* We haven't started the transition yet. */ | 728 | /* We haven't started the transition yet. */ |
728 | goto migrate_end; | 729 | goto migrate_end; |
729 | } | 730 | } |
731 | preempt_enable(); | ||
730 | break; | 732 | break; |
731 | } | 733 | } |
732 | 734 | ||
@@ -761,10 +763,13 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
761 | } | 763 | } |
762 | 764 | ||
763 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 765 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
764 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 766 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { |
767 | preempt_enable(); | ||
765 | break; | 768 | break; |
769 | } | ||
766 | 770 | ||
767 | cpu_set(j, covered_cpus); | 771 | cpu_set(j, covered_cpus); |
772 | preempt_enable(); | ||
768 | } | 773 | } |
769 | 774 | ||
770 | for_each_cpu_mask(k, online_policy_cpus) { | 775 | for_each_cpu_mask(k, online_policy_cpus) { |
@@ -796,8 +801,11 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
796 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 801 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
797 | } | 802 | } |
798 | } | 803 | } |
804 | set_cpus_allowed(current, saved_mask); | ||
805 | return 0; | ||
799 | 806 | ||
800 | migrate_end: | 807 | migrate_end: |
808 | preempt_enable(); | ||
801 | set_cpus_allowed(current, saved_mask); | 809 | set_cpus_allowed(current, saved_mask); |
802 | return 0; | 810 | return 0; |
803 | } | 811 | } |
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c index d59277c00911..b1acc8ce3167 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
16 | #include <linux/pci.h> | ||
17 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
18 | 17 | ||
19 | #include <asm/msr.h> | 18 | #include <asm/msr.h> |
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c index ff0d89806114..e1c509aa3054 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c | |||
@@ -17,10 +17,10 @@ | |||
17 | #include <linux/moduleparam.h> | 17 | #include <linux/moduleparam.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/cpufreq.h> | 19 | #include <linux/cpufreq.h> |
20 | #include <linux/pci.h> | ||
21 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
22 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
23 | #include <asm/ist.h> | 22 | #include <asm/ist.h> |
23 | #include <asm/io.h> | ||
24 | 24 | ||
25 | #include "speedstep-lib.h" | 25 | #include "speedstep-lib.h" |
26 | 26 | ||
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c index 10cef5ca8a5b..f8a3c4054c70 100644 --- a/arch/i386/kernel/i8253.c +++ b/arch/i386/kernel/i8253.c | |||
@@ -110,7 +110,7 @@ void __init setup_pit_timer(void) | |||
110 | * Start pit with the boot cpu mask and make it global after the | 110 | * Start pit with the boot cpu mask and make it global after the |
111 | * IO_APIC has been initialized. | 111 | * IO_APIC has been initialized. |
112 | */ | 112 | */ |
113 | pit_clockevent.cpumask = cpumask_of_cpu(0); | 113 | pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); |
114 | pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32); | 114 | pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32); |
115 | pit_clockevent.max_delta_ns = | 115 | pit_clockevent.max_delta_ns = |
116 | clockevent_delta2ns(0x7FFF, &pit_clockevent); | 116 | clockevent_delta2ns(0x7FFF, &pit_clockevent); |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index f23a17b3b8cf..1b623cda3a64 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -2579,19 +2579,19 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) | |||
2579 | if (irq < 0) | 2579 | if (irq < 0) |
2580 | return irq; | 2580 | return irq; |
2581 | 2581 | ||
2582 | set_irq_msi(irq, desc); | ||
2583 | ret = msi_compose_msg(dev, irq, &msg); | 2582 | ret = msi_compose_msg(dev, irq, &msg); |
2584 | if (ret < 0) { | 2583 | if (ret < 0) { |
2585 | destroy_irq(irq); | 2584 | destroy_irq(irq); |
2586 | return ret; | 2585 | return ret; |
2587 | } | 2586 | } |
2588 | 2587 | ||
2588 | set_irq_msi(irq, desc); | ||
2589 | write_msi_msg(irq, &msg); | 2589 | write_msi_msg(irq, &msg); |
2590 | 2590 | ||
2591 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, | 2591 | set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, |
2592 | "edge"); | 2592 | "edge"); |
2593 | 2593 | ||
2594 | return irq; | 2594 | return 0; |
2595 | } | 2595 | } |
2596 | 2596 | ||
2597 | void arch_teardown_msi_irq(unsigned int irq) | 2597 | void arch_teardown_msi_irq(unsigned int irq) |
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c index cfa16c151c8f..447bb105cf58 100644 --- a/arch/i386/mach-voyager/setup.c +++ b/arch/i386/mach-voyager/setup.c | |||
@@ -40,10 +40,16 @@ void __init trap_init_hook(void) | |||
40 | { | 40 | { |
41 | } | 41 | } |
42 | 42 | ||
43 | static struct irqaction irq0 = { timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL}; | 43 | static struct irqaction irq0 = { |
44 | .handler = timer_interrupt, | ||
45 | .flags = IRQF_DISABLED | IRQF_NOBALANCING, | ||
46 | .mask = CPU_MASK_NONE, | ||
47 | .name = "timer" | ||
48 | }; | ||
44 | 49 | ||
45 | void __init time_init_hook(void) | 50 | void __init time_init_hook(void) |
46 | { | 51 | { |
52 | irq0.mask = cpumask_of_cpu(safe_smp_processor_id()); | ||
47 | setup_irq(0, &irq0); | 53 | setup_irq(0, &irq0); |
48 | } | 54 | } |
49 | 55 | ||
diff --git a/arch/i386/mach-voyager/voyager_cat.c b/arch/i386/mach-voyager/voyager_cat.c index 943a9473b138..26a2d4c54b68 100644 --- a/arch/i386/mach-voyager/voyager_cat.c +++ b/arch/i386/mach-voyager/voyager_cat.c | |||
@@ -1111,7 +1111,7 @@ voyager_cat_do_common_interrupt(void) | |||
1111 | printk(KERN_ERR "Voyager front panel switch turned off\n"); | 1111 | printk(KERN_ERR "Voyager front panel switch turned off\n"); |
1112 | voyager_status.switch_off = 1; | 1112 | voyager_status.switch_off = 1; |
1113 | voyager_status.request_from_kernel = 1; | 1113 | voyager_status.request_from_kernel = 1; |
1114 | up(&kvoyagerd_sem); | 1114 | wake_up_process(voyager_thread); |
1115 | } | 1115 | } |
1116 | /* Tell the hardware we're taking care of the | 1116 | /* Tell the hardware we're taking care of the |
1117 | * shutdown, otherwise it will power the box off | 1117 | * shutdown, otherwise it will power the box off |
@@ -1157,7 +1157,7 @@ voyager_cat_do_common_interrupt(void) | |||
1157 | outb(VOYAGER_CAT_END, CAT_CMD); | 1157 | outb(VOYAGER_CAT_END, CAT_CMD); |
1158 | voyager_status.power_fail = 1; | 1158 | voyager_status.power_fail = 1; |
1159 | voyager_status.request_from_kernel = 1; | 1159 | voyager_status.request_from_kernel = 1; |
1160 | up(&kvoyagerd_sem); | 1160 | wake_up_process(voyager_thread); |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | 1163 | ||
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index b9ce33c0c202..1a5e448a29c7 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c | |||
@@ -536,15 +536,6 @@ do_boot_cpu(__u8 cpu) | |||
536 | & ~( voyager_extended_vic_processors | 536 | & ~( voyager_extended_vic_processors |
537 | & voyager_allowed_boot_processors); | 537 | & voyager_allowed_boot_processors); |
538 | 538 | ||
539 | /* For the 486, we can't use the 4Mb page table trick, so | ||
540 | * must map a region of memory */ | ||
541 | #ifdef CONFIG_M486 | ||
542 | int i; | ||
543 | unsigned long *page_table_copies = (unsigned long *) | ||
544 | __get_free_page(GFP_KERNEL); | ||
545 | #endif | ||
546 | pgd_t orig_swapper_pg_dir0; | ||
547 | |||
548 | /* This is an area in head.S which was used to set up the | 539 | /* This is an area in head.S which was used to set up the |
549 | * initial kernel stack. We need to alter this to give the | 540 | * initial kernel stack. We need to alter this to give the |
550 | * booting CPU a new stack (taken from its idle process) */ | 541 | * booting CPU a new stack (taken from its idle process) */ |
@@ -573,6 +564,8 @@ do_boot_cpu(__u8 cpu) | |||
573 | hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; | 564 | hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF; |
574 | 565 | ||
575 | cpucount++; | 566 | cpucount++; |
567 | alternatives_smp_switch(1); | ||
568 | |||
576 | idle = fork_idle(cpu); | 569 | idle = fork_idle(cpu); |
577 | if(IS_ERR(idle)) | 570 | if(IS_ERR(idle)) |
578 | panic("failed fork for CPU%d", cpu); | 571 | panic("failed fork for CPU%d", cpu); |
@@ -587,24 +580,11 @@ do_boot_cpu(__u8 cpu) | |||
587 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, | 580 | VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, |
588 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, | 581 | (unsigned long)hijack_source.val, hijack_source.idt.Segment, |
589 | hijack_source.idt.Offset, stack_start.esp)); | 582 | hijack_source.idt.Offset, stack_start.esp)); |
590 | /* set the original swapper_pg_dir[0] to map 0 to 4Mb transparently | 583 | |
591 | * (so that the booting CPU can find start_32 */ | 584 | /* init lowmem identity mapping */ |
592 | orig_swapper_pg_dir0 = swapper_pg_dir[0]; | 585 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, |
593 | #ifdef CONFIG_M486 | 586 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); |
594 | if(page_table_copies == NULL) | 587 | flush_tlb_all(); |
595 | panic("No free memory for 486 page tables\n"); | ||
596 | for(i = 0; i < PAGE_SIZE/sizeof(unsigned long); i++) | ||
597 | page_table_copies[i] = (i * PAGE_SIZE) | ||
598 | | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT; | ||
599 | |||
600 | ((unsigned long *)swapper_pg_dir)[0] = | ||
601 | ((virt_to_phys(page_table_copies)) & PAGE_MASK) | ||
602 | | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT; | ||
603 | #else | ||
604 | ((unsigned long *)swapper_pg_dir)[0] = | ||
605 | (virt_to_phys(pg0) & PAGE_MASK) | ||
606 | | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT; | ||
607 | #endif | ||
608 | 588 | ||
609 | if(quad_boot) { | 589 | if(quad_boot) { |
610 | printk("CPU %d: non extended Quad boot\n", cpu); | 590 | printk("CPU %d: non extended Quad boot\n", cpu); |
@@ -647,11 +627,7 @@ do_boot_cpu(__u8 cpu) | |||
647 | udelay(100); | 627 | udelay(100); |
648 | } | 628 | } |
649 | /* reset the page table */ | 629 | /* reset the page table */ |
650 | swapper_pg_dir[0] = orig_swapper_pg_dir0; | 630 | zap_low_mappings(); |
651 | local_flush_tlb(); | ||
652 | #ifdef CONFIG_M486 | ||
653 | free_page((unsigned long)page_table_copies); | ||
654 | #endif | ||
655 | 631 | ||
656 | if (cpu_booted_map) { | 632 | if (cpu_booted_map) { |
657 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", | 633 | VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", |
@@ -1068,20 +1044,11 @@ smp_call_function_interrupt(void) | |||
1068 | } | 1044 | } |
1069 | } | 1045 | } |
1070 | 1046 | ||
1071 | /* Call this function on all CPUs using the function_interrupt above | 1047 | static int |
1072 | <func> The function to run. This must be fast and non-blocking. | 1048 | __smp_call_function_mask (void (*func) (void *info), void *info, int retry, |
1073 | <info> An arbitrary pointer to pass to the function. | 1049 | int wait, __u32 mask) |
1074 | <retry> If true, keep retrying until ready. | ||
1075 | <wait> If true, wait until function has completed on other CPUs. | ||
1076 | [RETURNS] 0 on success, else a negative status code. Does not return until | ||
1077 | remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
1078 | */ | ||
1079 | int | ||
1080 | smp_call_function (void (*func) (void *info), void *info, int retry, | ||
1081 | int wait) | ||
1082 | { | 1050 | { |
1083 | struct call_data_struct data; | 1051 | struct call_data_struct data; |
1084 | __u32 mask = cpus_addr(cpu_online_map)[0]; | ||
1085 | 1052 | ||
1086 | mask &= ~(1<<smp_processor_id()); | 1053 | mask &= ~(1<<smp_processor_id()); |
1087 | 1054 | ||
@@ -1102,7 +1069,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry, | |||
1102 | call_data = &data; | 1069 | call_data = &data; |
1103 | wmb(); | 1070 | wmb(); |
1104 | /* Send a message to all other CPUs and wait for them to respond */ | 1071 | /* Send a message to all other CPUs and wait for them to respond */ |
1105 | send_CPI_allbutself(VIC_CALL_FUNCTION_CPI); | 1072 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); |
1106 | 1073 | ||
1107 | /* Wait for response */ | 1074 | /* Wait for response */ |
1108 | while (data.started) | 1075 | while (data.started) |
@@ -1116,8 +1083,48 @@ smp_call_function (void (*func) (void *info), void *info, int retry, | |||
1116 | 1083 | ||
1117 | return 0; | 1084 | return 0; |
1118 | } | 1085 | } |
1086 | |||
1087 | /* Call this function on all CPUs using the function_interrupt above | ||
1088 | <func> The function to run. This must be fast and non-blocking. | ||
1089 | <info> An arbitrary pointer to pass to the function. | ||
1090 | <retry> If true, keep retrying until ready. | ||
1091 | <wait> If true, wait until function has completed on other CPUs. | ||
1092 | [RETURNS] 0 on success, else a negative status code. Does not return until | ||
1093 | remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
1094 | */ | ||
1095 | int | ||
1096 | smp_call_function(void (*func) (void *info), void *info, int retry, | ||
1097 | int wait) | ||
1098 | { | ||
1099 | __u32 mask = cpus_addr(cpu_online_map)[0]; | ||
1100 | |||
1101 | return __smp_call_function_mask(func, info, retry, wait, mask); | ||
1102 | } | ||
1119 | EXPORT_SYMBOL(smp_call_function); | 1103 | EXPORT_SYMBOL(smp_call_function); |
1120 | 1104 | ||
1105 | /* | ||
1106 | * smp_call_function_single - Run a function on another CPU | ||
1107 | * @func: The function to run. This must be fast and non-blocking. | ||
1108 | * @info: An arbitrary pointer to pass to the function. | ||
1109 | * @nonatomic: Currently unused. | ||
1110 | * @wait: If true, wait until function has completed on other CPUs. | ||
1111 | * | ||
1112 | * Retrurns 0 on success, else a negative status code. | ||
1113 | * | ||
1114 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
1115 | * or is or has executed. | ||
1116 | */ | ||
1117 | |||
1118 | int | ||
1119 | smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
1120 | int nonatomic, int wait) | ||
1121 | { | ||
1122 | __u32 mask = 1 << cpu; | ||
1123 | |||
1124 | return __smp_call_function_mask(func, info, nonatomic, wait, mask); | ||
1125 | } | ||
1126 | EXPORT_SYMBOL(smp_call_function_single); | ||
1127 | |||
1121 | /* Sorry about the name. In an APIC based system, the APICs | 1128 | /* Sorry about the name. In an APIC based system, the APICs |
1122 | * themselves are programmed to send a timer interrupt. This is used | 1129 | * themselves are programmed to send a timer interrupt. This is used |
1123 | * by linux to reschedule the processor. Voyager doesn't have this, | 1130 | * by linux to reschedule the processor. Voyager doesn't have this, |
diff --git a/arch/i386/mach-voyager/voyager_thread.c b/arch/i386/mach-voyager/voyager_thread.c index f39887359e8e..fdc1d926fb2a 100644 --- a/arch/i386/mach-voyager/voyager_thread.c +++ b/arch/i386/mach-voyager/voyager_thread.c | |||
@@ -24,33 +24,16 @@ | |||
24 | #include <linux/kmod.h> | 24 | #include <linux/kmod.h> |
25 | #include <linux/completion.h> | 25 | #include <linux/completion.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/kthread.h> | ||
27 | #include <asm/desc.h> | 28 | #include <asm/desc.h> |
28 | #include <asm/voyager.h> | 29 | #include <asm/voyager.h> |
29 | #include <asm/vic.h> | 30 | #include <asm/vic.h> |
30 | #include <asm/mtrr.h> | 31 | #include <asm/mtrr.h> |
31 | #include <asm/msr.h> | 32 | #include <asm/msr.h> |
32 | 33 | ||
33 | #define THREAD_NAME "kvoyagerd" | ||
34 | 34 | ||
35 | /* external variables */ | 35 | struct task_struct *voyager_thread; |
36 | int kvoyagerd_running = 0; | 36 | static __u8 set_timeout; |
37 | DECLARE_MUTEX_LOCKED(kvoyagerd_sem); | ||
38 | |||
39 | static int thread(void *); | ||
40 | |||
41 | static __u8 set_timeout = 0; | ||
42 | |||
43 | /* Start the machine monitor thread. Return 1 if OK, 0 if fail */ | ||
44 | static int __init | ||
45 | voyager_thread_start(void) | ||
46 | { | ||
47 | if(kernel_thread(thread, NULL, CLONE_KERNEL) < 0) { | ||
48 | /* This is serious, but not fatal */ | ||
49 | printk(KERN_ERR "Voyager: Failed to create system monitor thread!!!\n"); | ||
50 | return 1; | ||
51 | } | ||
52 | return 0; | ||
53 | } | ||
54 | 37 | ||
55 | static int | 38 | static int |
56 | execute(const char *string) | 39 | execute(const char *string) |
@@ -110,31 +93,15 @@ check_continuing_condition(void) | |||
110 | } | 93 | } |
111 | } | 94 | } |
112 | 95 | ||
113 | static void | ||
114 | wakeup(unsigned long unused) | ||
115 | { | ||
116 | up(&kvoyagerd_sem); | ||
117 | } | ||
118 | |||
119 | static int | 96 | static int |
120 | thread(void *unused) | 97 | thread(void *unused) |
121 | { | 98 | { |
122 | struct timer_list wakeup_timer; | ||
123 | |||
124 | kvoyagerd_running = 1; | ||
125 | |||
126 | daemonize(THREAD_NAME); | ||
127 | |||
128 | set_timeout = 0; | ||
129 | |||
130 | init_timer(&wakeup_timer); | ||
131 | |||
132 | sigfillset(¤t->blocked); | ||
133 | |||
134 | printk(KERN_NOTICE "Voyager starting monitor thread\n"); | 99 | printk(KERN_NOTICE "Voyager starting monitor thread\n"); |
135 | 100 | ||
136 | for(;;) { | 101 | for (;;) { |
137 | down_interruptible(&kvoyagerd_sem); | 102 | set_current_state(TASK_INTERRUPTIBLE); |
103 | schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT); | ||
104 | |||
138 | VDEBUG(("Voyager Daemon awoken\n")); | 105 | VDEBUG(("Voyager Daemon awoken\n")); |
139 | if(voyager_status.request_from_kernel == 0) { | 106 | if(voyager_status.request_from_kernel == 0) { |
140 | /* probably awoken from timeout */ | 107 | /* probably awoken from timeout */ |
@@ -143,20 +110,26 @@ thread(void *unused) | |||
143 | check_from_kernel(); | 110 | check_from_kernel(); |
144 | voyager_status.request_from_kernel = 0; | 111 | voyager_status.request_from_kernel = 0; |
145 | } | 112 | } |
146 | if(set_timeout) { | ||
147 | del_timer(&wakeup_timer); | ||
148 | wakeup_timer.expires = HZ + jiffies; | ||
149 | wakeup_timer.function = wakeup; | ||
150 | add_timer(&wakeup_timer); | ||
151 | } | ||
152 | } | 113 | } |
153 | } | 114 | } |
154 | 115 | ||
116 | static int __init | ||
117 | voyager_thread_start(void) | ||
118 | { | ||
119 | voyager_thread = kthread_run(thread, NULL, "kvoyagerd"); | ||
120 | if (IS_ERR(voyager_thread)) { | ||
121 | printk(KERN_ERR "Voyager: Failed to create system monitor thread.\n"); | ||
122 | return PTR_ERR(voyager_thread); | ||
123 | } | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | |||
155 | static void __exit | 128 | static void __exit |
156 | voyager_thread_stop(void) | 129 | voyager_thread_stop(void) |
157 | { | 130 | { |
158 | /* FIXME: do nothing at the moment */ | 131 | kthread_stop(voyager_thread); |
159 | } | 132 | } |
160 | 133 | ||
161 | module_init(voyager_thread_start); | 134 | module_init(voyager_thread_start); |
162 | //module_exit(voyager_thread_stop); | 135 | module_exit(voyager_thread_stop); |
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c index 8053b17ab647..b62eafb997bc 100644 --- a/arch/i386/pci/fixup.c +++ b/arch/i386/pci/fixup.c | |||
@@ -354,7 +354,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev) | |||
354 | printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev)); | 354 | printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev)); |
355 | } | 355 | } |
356 | } | 356 | } |
357 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); | 357 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); |
358 | 358 | ||
359 | /* | 359 | /* |
360 | * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A. | 360 | * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A. |
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c index 43005f044424..bcd2f94b732c 100644 --- a/arch/i386/pci/i386.c +++ b/arch/i386/pci/i386.c | |||
@@ -246,8 +246,8 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask) | |||
246 | continue; | 246 | continue; |
247 | if (!r->start && r->end) { | 247 | if (!r->start && r->end) { |
248 | printk(KERN_ERR "PCI: Device %s not available " | 248 | printk(KERN_ERR "PCI: Device %s not available " |
249 | "because of resource collisions\n", | 249 | "because of resource %d collisions\n", |
250 | pci_name(dev)); | 250 | pci_name(dev), idx); |
251 | return -EINVAL; | 251 | return -EINVAL; |
252 | } | 252 | } |
253 | if (r->flags & IORESOURCE_IO) | 253 | if (r->flags & IORESOURCE_IO) |