aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-11 06:57:01 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-11 06:57:01 -0400
commit6de9c70882ecdee63a652d493bf2353963bd4c22 (patch)
tree9d219e705492331c97f5f7dccce3b0b1a29251bf /arch/x86/kernel
parentd406d21d90dce2e66c7eb4a44605aac947fe55fb (diff)
parent796aadeb1b2db9b5d463946766c5bbfd7717158c (diff)
Merge branch 'linus' into x86/cleanups
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/amd_iommu.c17
-rw-r--r--arch/x86/kernel/amd_iommu_init.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c10
-rw-r--r--arch/x86/kernel/cpu/cpufreq/elanfreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c125
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c12
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c3
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c3
-rw-r--r--arch/x86/kernel/genapic_64.c1
-rw-r--r--arch/x86/kernel/head_32.S8
-rw-r--r--arch/x86/kernel/ldt.c6
-rw-r--r--arch/x86/kernel/machine_kexec_32.c39
-rw-r--r--arch/x86/kernel/machine_kexec_64.c2
-rw-r--r--arch/x86/kernel/microcode.c17
-rw-r--r--arch/x86/kernel/pci-calgary_64.c73
-rw-r--r--arch/x86/kernel/pci-dma.c157
-rw-r--r--arch/x86/kernel/pci-gart_64.c14
-rw-r--r--arch/x86/kernel/pci-nommu.c14
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c2
-rw-r--r--arch/x86/kernel/reboot.c11
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S174
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/setup_percpu.c21
26 files changed, 370 insertions, 368 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 9220cf46aa10..c2502eb9aa83 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
73 struct cpuinfo_x86 *c = &cpu_data(cpu); 73 struct cpuinfo_x86 *c = &cpu_data(cpu);
74 74
75 cpumask_t saved_mask; 75 cpumask_t saved_mask;
76 cpumask_of_cpu_ptr(new_mask, cpu);
77 int retval; 76 int retval;
78 unsigned int eax, ebx, ecx, edx; 77 unsigned int eax, ebx, ecx, edx;
79 unsigned int edx_part; 78 unsigned int edx_part;
@@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
92 91
93 /* Make sure we are running on right CPU */ 92 /* Make sure we are running on right CPU */
94 saved_mask = current->cpus_allowed; 93 saved_mask = current->cpus_allowed;
95 retval = set_cpus_allowed_ptr(current, new_mask); 94 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
96 if (retval) 95 if (retval)
97 return -1; 96 return -1;
98 97
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index c25210e6ac88..22d7d050905d 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -29,9 +29,6 @@
29 29
30#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) 30#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
31 31
32#define to_pages(addr, size) \
33 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
34
35#define EXIT_LOOP_COUNT 10000000 32#define EXIT_LOOP_COUNT 10000000
36 33
37static DEFINE_RWLOCK(amd_iommu_devtable_lock); 34static DEFINE_RWLOCK(amd_iommu_devtable_lock);
@@ -185,7 +182,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
185 u64 address, size_t size) 182 u64 address, size_t size)
186{ 183{
187 int s = 0; 184 int s = 0;
188 unsigned pages = to_pages(address, size); 185 unsigned pages = iommu_num_pages(address, size);
189 186
190 address &= PAGE_MASK; 187 address &= PAGE_MASK;
191 188
@@ -557,8 +554,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
557 if (iommu->exclusion_start && 554 if (iommu->exclusion_start &&
558 iommu->exclusion_start < dma_dom->aperture_size) { 555 iommu->exclusion_start < dma_dom->aperture_size) {
559 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; 556 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
560 int pages = to_pages(iommu->exclusion_start, 557 int pages = iommu_num_pages(iommu->exclusion_start,
561 iommu->exclusion_length); 558 iommu->exclusion_length);
562 dma_ops_reserve_addresses(dma_dom, startpage, pages); 559 dma_ops_reserve_addresses(dma_dom, startpage, pages);
563 } 560 }
564 561
@@ -667,7 +664,7 @@ static int get_device_resources(struct device *dev,
667 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 664 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
668 665
669 /* device not translated by any IOMMU in the system? */ 666 /* device not translated by any IOMMU in the system? */
670 if (_bdf >= amd_iommu_last_bdf) { 667 if (_bdf > amd_iommu_last_bdf) {
671 *iommu = NULL; 668 *iommu = NULL;
672 *domain = NULL; 669 *domain = NULL;
673 *bdf = 0xffff; 670 *bdf = 0xffff;
@@ -767,7 +764,7 @@ static dma_addr_t __map_single(struct device *dev,
767 unsigned int pages; 764 unsigned int pages;
768 int i; 765 int i;
769 766
770 pages = to_pages(paddr, size); 767 pages = iommu_num_pages(paddr, size);
771 paddr &= PAGE_MASK; 768 paddr &= PAGE_MASK;
772 769
773 address = dma_ops_alloc_addresses(dev, dma_dom, pages); 770 address = dma_ops_alloc_addresses(dev, dma_dom, pages);
@@ -802,7 +799,7 @@ static void __unmap_single(struct amd_iommu *iommu,
802 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 799 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
803 return; 800 return;
804 801
805 pages = to_pages(dma_addr, size); 802 pages = iommu_num_pages(dma_addr, size);
806 dma_addr &= PAGE_MASK; 803 dma_addr &= PAGE_MASK;
807 start = dma_addr; 804 start = dma_addr;
808 805
@@ -1085,7 +1082,7 @@ void prealloc_protection_domains(void)
1085 1082
1086 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1083 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1087 devid = (dev->bus->number << 8) | dev->devfn; 1084 devid = (dev->bus->number << 8) | dev->devfn;
1088 if (devid >= amd_iommu_last_bdf) 1085 if (devid > amd_iommu_last_bdf)
1089 continue; 1086 continue;
1090 devid = amd_iommu_alias_table[devid]; 1087 devid = amd_iommu_alias_table[devid];
1091 if (domain_for_device(devid)) 1088 if (domain_for_device(devid))
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index c9d8ff2eb130..d9a9da597e79 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -732,7 +732,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
732 set_device_exclusion_range(m->devid, m); 732 set_device_exclusion_range(m->devid, m);
733 break; 733 break;
734 case ACPI_IVMD_TYPE_ALL: 734 case ACPI_IVMD_TYPE_ALL:
735 for (i = 0; i < amd_iommu_last_bdf; ++i) 735 for (i = 0; i <= amd_iommu_last_bdf; ++i)
736 set_device_exclusion_range(i, m); 736 set_device_exclusion_range(i, m);
737 break; 737 break;
738 case ACPI_IVMD_TYPE_RANGE: 738 case ACPI_IVMD_TYPE_RANGE:
@@ -934,7 +934,7 @@ int __init amd_iommu_init(void)
934 /* 934 /*
935 * let all alias entries point to itself 935 * let all alias entries point to itself
936 */ 936 */
937 for (i = 0; i < amd_iommu_last_bdf; ++i) 937 for (i = 0; i <= amd_iommu_last_bdf; ++i)
938 amd_iommu_alias_table[i] = i; 938 amd_iommu_alias_table[i] = i;
939 939
940 /* 940 /*
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig
index cb7a5715596d..efae3b22a0ff 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig
@@ -235,9 +235,9 @@ config X86_LONGHAUL
235 If in doubt, say N. 235 If in doubt, say N.
236 236
237config X86_E_POWERSAVER 237config X86_E_POWERSAVER
238 tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)" 238 tristate "VIA C7 Enhanced PowerSaver"
239 select CPU_FREQ_TABLE 239 select CPU_FREQ_TABLE
240 depends on X86_32 && EXPERIMENTAL 240 depends on X86_32
241 help 241 help
242 This adds the CPUFreq driver for VIA C7 processors. 242 This adds the CPUFreq driver for VIA C7 processors.
243 243
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index ff2fff56f0a8..dd097b835839 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd)
200static void drv_write(struct drv_cmd *cmd) 200static void drv_write(struct drv_cmd *cmd)
201{ 201{
202 cpumask_t saved_mask = current->cpus_allowed; 202 cpumask_t saved_mask = current->cpus_allowed;
203 cpumask_of_cpu_ptr_declare(cpu_mask);
204 unsigned int i; 203 unsigned int i;
205 204
206 for_each_cpu_mask_nr(i, cmd->mask) { 205 for_each_cpu_mask_nr(i, cmd->mask) {
207 cpumask_of_cpu_ptr_next(cpu_mask, i); 206 set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
208 set_cpus_allowed_ptr(current, cpu_mask);
209 do_drv_write(cmd); 207 do_drv_write(cmd);
210 } 208 }
211 209
@@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu)
269 } aperf_cur, mperf_cur; 267 } aperf_cur, mperf_cur;
270 268
271 cpumask_t saved_mask; 269 cpumask_t saved_mask;
272 cpumask_of_cpu_ptr(cpu_mask, cpu);
273 unsigned int perf_percent; 270 unsigned int perf_percent;
274 unsigned int retval; 271 unsigned int retval;
275 272
276 saved_mask = current->cpus_allowed; 273 saved_mask = current->cpus_allowed;
277 set_cpus_allowed_ptr(current, cpu_mask); 274 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
278 if (get_cpu() != cpu) { 275 if (get_cpu() != cpu) {
279 /* We were not able to run on requested processor */ 276 /* We were not able to run on requested processor */
280 put_cpu(); 277 put_cpu();
@@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu)
340 337
341static unsigned int get_cur_freq_on_cpu(unsigned int cpu) 338static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
342{ 339{
343 cpumask_of_cpu_ptr(cpu_mask, cpu);
344 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); 340 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
345 unsigned int freq; 341 unsigned int freq;
346 unsigned int cached_freq; 342 unsigned int cached_freq;
@@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
353 } 349 }
354 350
355 cached_freq = data->freq_table[data->acpi_data->state].frequency; 351 cached_freq = data->freq_table[data->acpi_data->state].frequency;
356 freq = extract_freq(get_cur_val(cpu_mask), data); 352 freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
357 if (freq != cached_freq) { 353 if (freq != cached_freq) {
358 /* 354 /*
359 * The dreaded BIOS frequency change behind our back. 355 * The dreaded BIOS frequency change behind our back.
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
index 94619c22f563..e4a4bf870e94 100644
--- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
@@ -44,7 +44,7 @@ struct s_elan_multiplier {
44 * It is important that the frequencies 44 * It is important that the frequencies
45 * are listed in ascending order here! 45 * are listed in ascending order here!
46 */ 46 */
47struct s_elan_multiplier elan_multiplier[] = { 47static struct s_elan_multiplier elan_multiplier[] = {
48 {1000, 0x02, 0x18}, 48 {1000, 0x02, 0x18},
49 {2000, 0x02, 0x10}, 49 {2000, 0x02, 0x10},
50 {4000, 0x02, 0x08}, 50 {4000, 0x02, 0x08},
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 53c7b6936973..4e7271999a74 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -66,7 +66,6 @@ static u32 find_freq_from_fid(u32 fid)
66 return 800 + (fid * 100); 66 return 800 + (fid * 100);
67} 67}
68 68
69
70/* Return a frequency in KHz, given an input fid */ 69/* Return a frequency in KHz, given an input fid */
71static u32 find_khz_freq_from_fid(u32 fid) 70static u32 find_khz_freq_from_fid(u32 fid)
72{ 71{
@@ -78,7 +77,6 @@ static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 p
78 return data[pstate].frequency; 77 return data[pstate].frequency;
79} 78}
80 79
81
82/* Return the vco fid for an input fid 80/* Return the vco fid for an input fid
83 * 81 *
84 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 82 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@ -166,7 +164,6 @@ static void fidvid_msr_init(void)
166 wrmsr(MSR_FIDVID_CTL, lo, hi); 164 wrmsr(MSR_FIDVID_CTL, lo, hi);
167} 165}
168 166
169
170/* write the new fid value along with the other control fields to the msr */ 167/* write the new fid value along with the other control fields to the msr */
171static int write_new_fid(struct powernow_k8_data *data, u32 fid) 168static int write_new_fid(struct powernow_k8_data *data, u32 fid)
172{ 169{
@@ -479,12 +476,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi
479static int check_supported_cpu(unsigned int cpu) 476static int check_supported_cpu(unsigned int cpu)
480{ 477{
481 cpumask_t oldmask; 478 cpumask_t oldmask;
482 cpumask_of_cpu_ptr(cpu_mask, cpu);
483 u32 eax, ebx, ecx, edx; 479 u32 eax, ebx, ecx, edx;
484 unsigned int rc = 0; 480 unsigned int rc = 0;
485 481
486 oldmask = current->cpus_allowed; 482 oldmask = current->cpus_allowed;
487 set_cpus_allowed_ptr(current, cpu_mask); 483 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
488 484
489 if (smp_processor_id() != cpu) { 485 if (smp_processor_id() != cpu) {
490 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); 486 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
@@ -741,44 +737,63 @@ static int find_psb_table(struct powernow_k8_data *data)
741#ifdef CONFIG_X86_POWERNOW_K8_ACPI 737#ifdef CONFIG_X86_POWERNOW_K8_ACPI
742static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) 738static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
743{ 739{
744 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) 740 if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE))
745 return; 741 return;
746 742
747 data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; 743 data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK;
748 data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; 744 data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK;
749 data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; 745 data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
750 data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; 746 data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
751 data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); 747 data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK);
752 data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; 748 data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK;
749}
750
751
752static struct acpi_processor_performance *acpi_perf_data;
753static int preregister_valid;
754
755static int powernow_k8_cpu_preinit_acpi(void)
756{
757 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
758 if (!acpi_perf_data)
759 return -ENODEV;
760
761 if (acpi_processor_preregister_performance(acpi_perf_data))
762 return -ENODEV;
763 else
764 preregister_valid = 1;
765 return 0;
753} 766}
754 767
755static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) 768static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
756{ 769{
757 struct cpufreq_frequency_table *powernow_table; 770 struct cpufreq_frequency_table *powernow_table;
758 int ret_val; 771 int ret_val;
772 int cpu = 0;
759 773
760 if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { 774 data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
775 if (acpi_processor_register_performance(data->acpi_data, data->cpu)) {
761 dprintk("register performance failed: bad ACPI data\n"); 776 dprintk("register performance failed: bad ACPI data\n");
762 return -EIO; 777 return -EIO;
763 } 778 }
764 779
765 /* verify the data contained in the ACPI structures */ 780 /* verify the data contained in the ACPI structures */
766 if (data->acpi_data.state_count <= 1) { 781 if (data->acpi_data->state_count <= 1) {
767 dprintk("No ACPI P-States\n"); 782 dprintk("No ACPI P-States\n");
768 goto err_out; 783 goto err_out;
769 } 784 }
770 785
771 if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 786 if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
772 (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 787 (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
773 dprintk("Invalid control/status registers (%x - %x)\n", 788 dprintk("Invalid control/status registers (%x - %x)\n",
774 data->acpi_data.control_register.space_id, 789 data->acpi_data->control_register.space_id,
775 data->acpi_data.status_register.space_id); 790 data->acpi_data->status_register.space_id);
776 goto err_out; 791 goto err_out;
777 } 792 }
778 793
779 /* fill in data->powernow_table */ 794 /* fill in data->powernow_table */
780 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) 795 powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
781 * (data->acpi_data.state_count + 1)), GFP_KERNEL); 796 * (data->acpi_data->state_count + 1)), GFP_KERNEL);
782 if (!powernow_table) { 797 if (!powernow_table) {
783 dprintk("powernow_table memory alloc failure\n"); 798 dprintk("powernow_table memory alloc failure\n");
784 goto err_out; 799 goto err_out;
@@ -791,12 +806,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
791 if (ret_val) 806 if (ret_val)
792 goto err_out_mem; 807 goto err_out_mem;
793 808
794 powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; 809 powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END;
795 powernow_table[data->acpi_data.state_count].index = 0; 810 powernow_table[data->acpi_data->state_count].index = 0;
796 data->powernow_table = powernow_table; 811 data->powernow_table = powernow_table;
797 812
798 /* fill in data */ 813 /* fill in data */
799 data->numps = data->acpi_data.state_count; 814 data->numps = data->acpi_data->state_count;
800 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) 815 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
801 print_basics(data); 816 print_basics(data);
802 powernow_k8_acpi_pst_values(data, 0); 817 powernow_k8_acpi_pst_values(data, 0);
@@ -804,16 +819,31 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
804 /* notify BIOS that we exist */ 819 /* notify BIOS that we exist */
805 acpi_processor_notify_smm(THIS_MODULE); 820 acpi_processor_notify_smm(THIS_MODULE);
806 821
822 /* determine affinity, from ACPI if available */
823 if (preregister_valid) {
824 if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) ||
825 (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY))
826 data->starting_core_affinity = data->acpi_data->shared_cpu_map;
827 else
828 data->starting_core_affinity = cpumask_of_cpu(data->cpu);
829 } else {
830 /* best guess from family if not */
831 if (cpu_family == CPU_HW_PSTATE)
832 data->starting_core_affinity = cpumask_of_cpu(data->cpu);
833 else
834 data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu);
835 }
836
807 return 0; 837 return 0;
808 838
809err_out_mem: 839err_out_mem:
810 kfree(powernow_table); 840 kfree(powernow_table);
811 841
812err_out: 842err_out:
813 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 843 acpi_processor_unregister_performance(data->acpi_data, data->cpu);
814 844
815 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ 845 /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
816 data->acpi_data.state_count = 0; 846 data->acpi_data->state_count = 0;
817 847
818 return -ENODEV; 848 return -ENODEV;
819} 849}
@@ -825,10 +855,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
825 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); 855 rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
826 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; 856 data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
827 857
828 for (i = 0; i < data->acpi_data.state_count; i++) { 858 for (i = 0; i < data->acpi_data->state_count; i++) {
829 u32 index; 859 u32 index;
830 860
831 index = data->acpi_data.states[i].control & HW_PSTATE_MASK; 861 index = data->acpi_data->states[i].control & HW_PSTATE_MASK;
832 if (index > data->max_hw_pstate) { 862 if (index > data->max_hw_pstate) {
833 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); 863 printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
834 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); 864 printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
@@ -844,7 +874,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
844 874
845 powernow_table[i].index = index; 875 powernow_table[i].index = index;
846 876
847 powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; 877 powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000;
848 } 878 }
849 return 0; 879 return 0;
850} 880}
@@ -853,16 +883,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
853{ 883{
854 int i; 884 int i;
855 int cntlofreq = 0; 885 int cntlofreq = 0;
856 for (i = 0; i < data->acpi_data.state_count; i++) { 886 for (i = 0; i < data->acpi_data->state_count; i++) {
857 u32 fid; 887 u32 fid;
858 u32 vid; 888 u32 vid;
859 889
860 if (data->exttype) { 890 if (data->exttype) {
861 fid = data->acpi_data.states[i].status & EXT_FID_MASK; 891 fid = data->acpi_data->states[i].status & EXT_FID_MASK;
862 vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; 892 vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK;
863 } else { 893 } else {
864 fid = data->acpi_data.states[i].control & FID_MASK; 894 fid = data->acpi_data->states[i].control & FID_MASK;
865 vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; 895 vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK;
866 } 896 }
867 897
868 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); 898 dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
@@ -903,10 +933,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
903 cntlofreq = i; 933 cntlofreq = i;
904 } 934 }
905 935
906 if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { 936 if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) {
907 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", 937 printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
908 powernow_table[i].frequency, 938 powernow_table[i].frequency,
909 (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); 939 (unsigned int) (data->acpi_data->states[i].core_frequency * 1000));
910 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; 940 powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
911 continue; 941 continue;
912 } 942 }
@@ -916,11 +946,12 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
916 946
917static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) 947static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
918{ 948{
919 if (data->acpi_data.state_count) 949 if (data->acpi_data->state_count)
920 acpi_processor_unregister_performance(&data->acpi_data, data->cpu); 950 acpi_processor_unregister_performance(data->acpi_data, data->cpu);
921} 951}
922 952
923#else 953#else
954static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; }
924static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } 955static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
925static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } 956static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
926static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } 957static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
@@ -1017,7 +1048,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
1017static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) 1048static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
1018{ 1049{
1019 cpumask_t oldmask; 1050 cpumask_t oldmask;
1020 cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
1021 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); 1051 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1022 u32 checkfid; 1052 u32 checkfid;
1023 u32 checkvid; 1053 u32 checkvid;
@@ -1032,7 +1062,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1032 1062
1033 /* only run on specific CPU from here on */ 1063 /* only run on specific CPU from here on */
1034 oldmask = current->cpus_allowed; 1064 oldmask = current->cpus_allowed;
1035 set_cpus_allowed_ptr(current, cpu_mask); 1065 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
1036 1066
1037 if (smp_processor_id() != pol->cpu) { 1067 if (smp_processor_id() != pol->cpu) {
1038 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1068 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1106,8 +1136,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
1106static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1136static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1107{ 1137{
1108 struct powernow_k8_data *data; 1138 struct powernow_k8_data *data;
1109 cpumask_t oldmask; 1139 cpumask_t oldmask = CPU_MASK_ALL;
1110 cpumask_of_cpu_ptr_declare(newmask);
1111 int rc; 1140 int rc;
1112 1141
1113 if (!cpu_online(pol->cpu)) 1142 if (!cpu_online(pol->cpu))
@@ -1159,8 +1188,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1159 1188
1160 /* only run on specific CPU from here on */ 1189 /* only run on specific CPU from here on */
1161 oldmask = current->cpus_allowed; 1190 oldmask = current->cpus_allowed;
1162 cpumask_of_cpu_ptr_next(newmask, pol->cpu); 1191 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
1163 set_cpus_allowed_ptr(current, newmask);
1164 1192
1165 if (smp_processor_id() != pol->cpu) { 1193 if (smp_processor_id() != pol->cpu) {
1166 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1194 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1181,10 +1209,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1181 /* run on any CPU again */ 1209 /* run on any CPU again */
1182 set_cpus_allowed_ptr(current, &oldmask); 1210 set_cpus_allowed_ptr(current, &oldmask);
1183 1211
1184 if (cpu_family == CPU_HW_PSTATE) 1212 pol->cpus = data->starting_core_affinity;
1185 pol->cpus = *newmask;
1186 else
1187 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1188 data->available_cores = &(pol->cpus); 1213 data->available_cores = &(pol->cpus);
1189 1214
1190 /* Take a crude guess here. 1215 /* Take a crude guess here.
@@ -1248,7 +1273,6 @@ static unsigned int powernowk8_get (unsigned int cpu)
1248{ 1273{
1249 struct powernow_k8_data *data; 1274 struct powernow_k8_data *data;
1250 cpumask_t oldmask = current->cpus_allowed; 1275 cpumask_t oldmask = current->cpus_allowed;
1251 cpumask_of_cpu_ptr(newmask, cpu);
1252 unsigned int khz = 0; 1276 unsigned int khz = 0;
1253 unsigned int first; 1277 unsigned int first;
1254 1278
@@ -1258,7 +1282,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1258 if (!data) 1282 if (!data)
1259 return -EINVAL; 1283 return -EINVAL;
1260 1284
1261 set_cpus_allowed_ptr(current, newmask); 1285 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
1262 if (smp_processor_id() != cpu) { 1286 if (smp_processor_id() != cpu) {
1263 printk(KERN_ERR PFX 1287 printk(KERN_ERR PFX
1264 "limiting to CPU %d failed in powernowk8_get\n", cpu); 1288 "limiting to CPU %d failed in powernowk8_get\n", cpu);
@@ -1308,6 +1332,7 @@ static int __cpuinit powernowk8_init(void)
1308 } 1332 }
1309 1333
1310 if (supported_cpus == num_online_cpus()) { 1334 if (supported_cpus == num_online_cpus()) {
1335 powernow_k8_cpu_preinit_acpi();
1311 printk(KERN_INFO PFX "Found %d %s " 1336 printk(KERN_INFO PFX "Found %d %s "
1312 "processors (%d cpu cores) (" VERSION ")\n", 1337 "processors (%d cpu cores) (" VERSION ")\n",
1313 num_online_nodes(), 1338 num_online_nodes(),
@@ -1324,6 +1349,10 @@ static void __exit powernowk8_exit(void)
1324 dprintk("exit\n"); 1349 dprintk("exit\n");
1325 1350
1326 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1351 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1352
1353#ifdef CONFIG_X86_POWERNOW_K8_ACPI
1354 free_percpu(acpi_perf_data);
1355#endif
1327} 1356}
1328 1357
1329MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); 1358MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>");
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index ab48cfed4d96..a62612cd4be8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -33,12 +33,13 @@ struct powernow_k8_data {
33#ifdef CONFIG_X86_POWERNOW_K8_ACPI 33#ifdef CONFIG_X86_POWERNOW_K8_ACPI
34 /* the acpi table needs to be kept. it's only available if ACPI was 34 /* the acpi table needs to be kept. it's only available if ACPI was
35 * used to determine valid frequency/vid/fid states */ 35 * used to determine valid frequency/vid/fid states */
36 struct acpi_processor_performance acpi_data; 36 struct acpi_processor_performance *acpi_data;
37#endif 37#endif
38 /* we need to keep track of associated cores, but let cpufreq 38 /* we need to keep track of associated cores, but let cpufreq
39 * handle hotplug events - so just point at cpufreq pol->cpus 39 * handle hotplug events - so just point at cpufreq pol->cpus
40 * structure */ 40 * structure */
41 cpumask_t *available_cores; 41 cpumask_t *available_cores;
42 cpumask_t starting_core_affinity;
42}; 43};
43 44
44 45
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index ca2ac13b7af2..15e13c01cc36 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu)
324 unsigned l, h; 324 unsigned l, h;
325 unsigned clock_freq; 325 unsigned clock_freq;
326 cpumask_t saved_mask; 326 cpumask_t saved_mask;
327 cpumask_of_cpu_ptr(new_mask, cpu);
328 327
329 saved_mask = current->cpus_allowed; 328 saved_mask = current->cpus_allowed;
330 set_cpus_allowed_ptr(current, new_mask); 329 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
331 if (smp_processor_id() != cpu) 330 if (smp_processor_id() != cpu)
332 return 0; 331 return 0;
333 332
@@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy,
585 * Best effort undo.. 584 * Best effort undo..
586 */ 585 */
587 586
588 if (!cpus_empty(*covered_cpus)) { 587 if (!cpus_empty(*covered_cpus))
589 cpumask_of_cpu_ptr_declare(new_mask);
590
591 for_each_cpu_mask_nr(j, *covered_cpus) { 588 for_each_cpu_mask_nr(j, *covered_cpus) {
592 cpumask_of_cpu_ptr_next(new_mask, j); 589 set_cpus_allowed_ptr(current,
593 set_cpus_allowed_ptr(current, new_mask); 590 &cpumask_of_cpu(j));
594 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); 591 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
595 } 592 }
596 }
597 593
598 tmp = freqs.new; 594 tmp = freqs.new;
599 freqs.new = freqs.old; 595 freqs.new = freqs.old;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 2f3728dc24f6..191f7263c61d 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
244 244
245static unsigned int speedstep_get(unsigned int cpu) 245static unsigned int speedstep_get(unsigned int cpu)
246{ 246{
247 cpumask_of_cpu_ptr(newmask, cpu); 247 return _speedstep_get(&cpumask_of_cpu(cpu));
248 return _speedstep_get(newmask);
249} 248}
250 249
251/** 250/**
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 650d40f7912b..6b0a10b002f1 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
516 unsigned long j; 516 unsigned long j;
517 int retval; 517 int retval;
518 cpumask_t oldmask; 518 cpumask_t oldmask;
519 cpumask_of_cpu_ptr(newmask, cpu);
520 519
521 if (num_cache_leaves == 0) 520 if (num_cache_leaves == 0)
522 return -ENOENT; 521 return -ENOENT;
@@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
527 return -ENOMEM; 526 return -ENOMEM;
528 527
529 oldmask = current->cpus_allowed; 528 oldmask = current->cpus_allowed;
530 retval = set_cpus_allowed_ptr(current, newmask); 529 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
531 if (retval) 530 if (retval)
532 goto out; 531 goto out;
533 532
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 1fa8be5bd217..eaff0bbb1444 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -99,3 +99,4 @@ int is_uv_system(void)
99{ 99{
100 return uv_system_type != UV_NONE; 100 return uv_system_type != UV_NONE;
101} 101}
102EXPORT_SYMBOL_GPL(is_uv_system);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index f67e93441caf..a7010c3a377a 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -456,9 +456,6 @@ is386: movl $2,%ecx # set MP
4561: 4561:
457#endif /* CONFIG_SMP */ 457#endif /* CONFIG_SMP */
458 jmp *(initial_code) 458 jmp *(initial_code)
459.align 4
460ENTRY(initial_code)
461 .long i386_start_kernel
462 459
463/* 460/*
464 * We depend on ET to be correct. This checks for 287/387. 461 * We depend on ET to be correct. This checks for 287/387.
@@ -601,6 +598,11 @@ ignore_int:
601#endif 598#endif
602 iret 599 iret
603 600
601.section .cpuinit.data,"wa"
602.align 4
603ENTRY(initial_code)
604 .long i386_start_kernel
605
604.section .text 606.section .text
605/* 607/*
606 * Real beginning of normal "text" segment 608 * Real beginning of normal "text" segment
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 3fee2aa50f3f..b68e21f06f4f 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
62 62
63 if (reload) { 63 if (reload) {
64#ifdef CONFIG_SMP 64#ifdef CONFIG_SMP
65 cpumask_of_cpu_ptr_declare(mask);
66
67 preempt_disable(); 65 preempt_disable();
68 load_LDT(pc); 66 load_LDT(pc);
69 cpumask_of_cpu_ptr_next(mask, smp_processor_id()); 67 if (!cpus_equal(current->mm->cpu_vm_mask,
70 if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) 68 cpumask_of_cpu(smp_processor_id())))
71 smp_call_function(flush_ldt, current->mm, 1); 69 smp_call_function(flush_ldt, current->mm, 1);
72 preempt_enable(); 70 preempt_enable();
73#else 71#else
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 8864230d55af..9fe478d98406 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -22,6 +22,7 @@
22#include <asm/cpufeature.h> 22#include <asm/cpufeature.h>
23#include <asm/desc.h> 23#include <asm/desc.h>
24#include <asm/system.h> 24#include <asm/system.h>
25#include <asm/cacheflush.h>
25 26
26#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE))) 27#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
27static u32 kexec_pgd[1024] PAGE_ALIGNED; 28static u32 kexec_pgd[1024] PAGE_ALIGNED;
@@ -85,10 +86,12 @@ static void load_segments(void)
85 * reboot code buffer to allow us to avoid allocations 86 * reboot code buffer to allow us to avoid allocations
86 * later. 87 * later.
87 * 88 *
88 * Currently nothing. 89 * Make control page executable.
89 */ 90 */
90int machine_kexec_prepare(struct kimage *image) 91int machine_kexec_prepare(struct kimage *image)
91{ 92{
93 if (nx_enabled)
94 set_pages_x(image->control_code_page, 1);
92 return 0; 95 return 0;
93} 96}
94 97
@@ -98,27 +101,48 @@ int machine_kexec_prepare(struct kimage *image)
98 */ 101 */
99void machine_kexec_cleanup(struct kimage *image) 102void machine_kexec_cleanup(struct kimage *image)
100{ 103{
104 if (nx_enabled)
105 set_pages_nx(image->control_code_page, 1);
101} 106}
102 107
103/* 108/*
104 * Do not allocate memory (or fail in any way) in machine_kexec(). 109 * Do not allocate memory (or fail in any way) in machine_kexec().
105 * We are past the point of no return, committed to rebooting now. 110 * We are past the point of no return, committed to rebooting now.
106 */ 111 */
107NORET_TYPE void machine_kexec(struct kimage *image) 112void machine_kexec(struct kimage *image)
108{ 113{
109 unsigned long page_list[PAGES_NR]; 114 unsigned long page_list[PAGES_NR];
110 void *control_page; 115 void *control_page;
116 asmlinkage unsigned long
117 (*relocate_kernel_ptr)(unsigned long indirection_page,
118 unsigned long control_page,
119 unsigned long start_address,
120 unsigned int has_pae,
121 unsigned int preserve_context);
111 122
112 tracer_disable(); 123 tracer_disable();
113 124
114 /* Interrupts aren't acceptable while we reboot */ 125 /* Interrupts aren't acceptable while we reboot */
115 local_irq_disable(); 126 local_irq_disable();
116 127
128 if (image->preserve_context) {
129#ifdef CONFIG_X86_IO_APIC
130 /* We need to put APICs in legacy mode so that we can
131 * get timer interrupts in second kernel. kexec/kdump
132 * paths already have calls to disable_IO_APIC() in
133 * one form or other. kexec jump path also need
134 * one.
135 */
136 disable_IO_APIC();
137#endif
138 }
139
117 control_page = page_address(image->control_code_page); 140 control_page = page_address(image->control_code_page);
118 memcpy(control_page, relocate_kernel, PAGE_SIZE); 141 memcpy(control_page, relocate_kernel, PAGE_SIZE/2);
119 142
143 relocate_kernel_ptr = control_page;
120 page_list[PA_CONTROL_PAGE] = __pa(control_page); 144 page_list[PA_CONTROL_PAGE] = __pa(control_page);
121 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; 145 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
122 page_list[PA_PGD] = __pa(kexec_pgd); 146 page_list[PA_PGD] = __pa(kexec_pgd);
123 page_list[VA_PGD] = (unsigned long)kexec_pgd; 147 page_list[VA_PGD] = (unsigned long)kexec_pgd;
124#ifdef CONFIG_X86_PAE 148#ifdef CONFIG_X86_PAE
@@ -131,6 +155,7 @@ NORET_TYPE void machine_kexec(struct kimage *image)
131 page_list[VA_PTE_0] = (unsigned long)kexec_pte0; 155 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
132 page_list[PA_PTE_1] = __pa(kexec_pte1); 156 page_list[PA_PTE_1] = __pa(kexec_pte1);
133 page_list[VA_PTE_1] = (unsigned long)kexec_pte1; 157 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
158 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT);
134 159
135 /* The segment registers are funny things, they have both a 160 /* The segment registers are funny things, they have both a
136 * visible and an invisible part. Whenever the visible part is 161 * visible and an invisible part. Whenever the visible part is
@@ -149,8 +174,10 @@ NORET_TYPE void machine_kexec(struct kimage *image)
149 set_idt(phys_to_virt(0),0); 174 set_idt(phys_to_virt(0),0);
150 175
151 /* now call it */ 176 /* now call it */
152 relocate_kernel((unsigned long)image->head, (unsigned long)page_list, 177 image->start = relocate_kernel_ptr((unsigned long)image->head,
153 image->start, cpu_has_pae); 178 (unsigned long)page_list,
179 image->start, cpu_has_pae,
180 image->preserve_context);
154} 181}
155 182
156void arch_crash_save_vmcoreinfo(void) 183void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 9dd9262693a3..c43caa3a91f3 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -181,7 +181,7 @@ void machine_kexec_cleanup(struct kimage *image)
181 * Do not allocate memory (or fail in any way) in machine_kexec(). 181 * Do not allocate memory (or fail in any way) in machine_kexec().
182 * We are past the point of no return, committed to rebooting now. 182 * We are past the point of no return, committed to rebooting now.
183 */ 183 */
184NORET_TYPE void machine_kexec(struct kimage *image) 184void machine_kexec(struct kimage *image)
185{ 185{
186 unsigned long page_list[PAGES_NR]; 186 unsigned long page_list[PAGES_NR];
187 void *control_page; 187 void *control_page;
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 6994c751590e..652fa5c38ebe 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -388,7 +388,6 @@ static int do_microcode_update (void)
388 void *new_mc = NULL; 388 void *new_mc = NULL;
389 int cpu; 389 int cpu;
390 cpumask_t old; 390 cpumask_t old;
391 cpumask_of_cpu_ptr_declare(newmask);
392 391
393 old = current->cpus_allowed; 392 old = current->cpus_allowed;
394 393
@@ -405,8 +404,7 @@ static int do_microcode_update (void)
405 404
406 if (!uci->valid) 405 if (!uci->valid)
407 continue; 406 continue;
408 cpumask_of_cpu_ptr_next(newmask, cpu); 407 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
409 set_cpus_allowed_ptr(current, newmask);
410 error = get_maching_microcode(new_mc, cpu); 408 error = get_maching_microcode(new_mc, cpu);
411 if (error < 0) 409 if (error < 0)
412 goto out; 410 goto out;
@@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu)
576 struct cpuinfo_x86 *c = &cpu_data(cpu); 574 struct cpuinfo_x86 *c = &cpu_data(cpu);
577 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 575 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
578 cpumask_t old; 576 cpumask_t old;
579 cpumask_of_cpu_ptr(newmask, cpu);
580 unsigned int val[2]; 577 unsigned int val[2];
581 int err = 0; 578 int err = 0;
582 579
@@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu)
585 return 0; 582 return 0;
586 583
587 old = current->cpus_allowed; 584 old = current->cpus_allowed;
588 set_cpus_allowed_ptr(current, newmask); 585 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
589 586
590 /* Check if the microcode we have in memory matches the CPU */ 587 /* Check if the microcode we have in memory matches the CPU */
591 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 588 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
@@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu)
623static void microcode_init_cpu(int cpu, int resume) 620static void microcode_init_cpu(int cpu, int resume)
624{ 621{
625 cpumask_t old; 622 cpumask_t old;
626 cpumask_of_cpu_ptr(newmask, cpu);
627 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 623 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
628 624
629 old = current->cpus_allowed; 625 old = current->cpus_allowed;
630 626
631 set_cpus_allowed_ptr(current, newmask); 627 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
632 mutex_lock(&microcode_mutex); 628 mutex_lock(&microcode_mutex);
633 collect_cpu_info(cpu); 629 collect_cpu_info(cpu);
634 if (uci->valid && system_state == SYSTEM_RUNNING && !resume) 630 if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
@@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev,
661 if (end == buf) 657 if (end == buf)
662 return -EINVAL; 658 return -EINVAL;
663 if (val == 1) { 659 if (val == 1) {
664 cpumask_t old; 660 cpumask_t old = current->cpus_allowed;
665 cpumask_of_cpu_ptr(newmask, cpu);
666
667 old = current->cpus_allowed;
668 661
669 get_online_cpus(); 662 get_online_cpus();
670 set_cpus_allowed_ptr(current, newmask); 663 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
671 664
672 mutex_lock(&microcode_mutex); 665 mutex_lock(&microcode_mutex);
673 if (uci->valid) 666 if (uci->valid)
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 19e7fc7c2c4f..b67a4b1d4eae 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -37,6 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/scatterlist.h> 38#include <linux/scatterlist.h>
39#include <linux/iommu-helper.h> 39#include <linux/iommu-helper.h>
40
40#include <asm/iommu.h> 41#include <asm/iommu.h>
41#include <asm/calgary.h> 42#include <asm/calgary.h>
42#include <asm/tce.h> 43#include <asm/tce.h>
@@ -413,22 +414,6 @@ static void calgary_unmap_sg(struct device *dev,
413 } 414 }
414} 415}
415 416
416static int calgary_nontranslate_map_sg(struct device* dev,
417 struct scatterlist *sg, int nelems, int direction)
418{
419 struct scatterlist *s;
420 int i;
421
422 for_each_sg(sg, s, nelems, i) {
423 struct page *p = sg_page(s);
424
425 BUG_ON(!p);
426 s->dma_address = virt_to_bus(sg_virt(s));
427 s->dma_length = s->length;
428 }
429 return nelems;
430}
431
432static int calgary_map_sg(struct device *dev, struct scatterlist *sg, 417static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
433 int nelems, int direction) 418 int nelems, int direction)
434{ 419{
@@ -439,9 +424,6 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
439 unsigned long entry; 424 unsigned long entry;
440 int i; 425 int i;
441 426
442 if (!translation_enabled(tbl))
443 return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
444
445 for_each_sg(sg, s, nelems, i) { 427 for_each_sg(sg, s, nelems, i) {
446 BUG_ON(!sg_page(s)); 428 BUG_ON(!sg_page(s));
447 429
@@ -477,7 +459,6 @@ error:
477static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr, 459static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
478 size_t size, int direction) 460 size_t size, int direction)
479{ 461{
480 dma_addr_t dma_handle = bad_dma_address;
481 void *vaddr = phys_to_virt(paddr); 462 void *vaddr = phys_to_virt(paddr);
482 unsigned long uaddr; 463 unsigned long uaddr;
483 unsigned int npages; 464 unsigned int npages;
@@ -486,12 +467,7 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
486 uaddr = (unsigned long)vaddr; 467 uaddr = (unsigned long)vaddr;
487 npages = num_dma_pages(uaddr, size); 468 npages = num_dma_pages(uaddr, size);
488 469
489 if (translation_enabled(tbl)) 470 return iommu_alloc(dev, tbl, vaddr, npages, direction);
490 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction);
491 else
492 dma_handle = virt_to_bus(vaddr);
493
494 return dma_handle;
495} 471}
496 472
497static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle, 473static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
@@ -500,9 +476,6 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
500 struct iommu_table *tbl = find_iommu_table(dev); 476 struct iommu_table *tbl = find_iommu_table(dev);
501 unsigned int npages; 477 unsigned int npages;
502 478
503 if (!translation_enabled(tbl))
504 return;
505
506 npages = num_dma_pages(dma_handle, size); 479 npages = num_dma_pages(dma_handle, size);
507 iommu_free(tbl, dma_handle, npages); 480 iommu_free(tbl, dma_handle, npages);
508} 481}
@@ -525,18 +498,12 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
525 goto error; 498 goto error;
526 memset(ret, 0, size); 499 memset(ret, 0, size);
527 500
528 if (translation_enabled(tbl)) { 501 /* set up tces to cover the allocated range */
529 /* set up tces to cover the allocated range */ 502 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
530 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); 503 if (mapping == bad_dma_address)
531 if (mapping == bad_dma_address) 504 goto free;
532 goto free; 505 *dma_handle = mapping;
533
534 *dma_handle = mapping;
535 } else /* non translated slot */
536 *dma_handle = virt_to_bus(ret);
537
538 return ret; 506 return ret;
539
540free: 507free:
541 free_pages((unsigned long)ret, get_order(size)); 508 free_pages((unsigned long)ret, get_order(size));
542 ret = NULL; 509 ret = NULL;
@@ -544,7 +511,7 @@ error:
544 return ret; 511 return ret;
545} 512}
546 513
547static const struct dma_mapping_ops calgary_dma_ops = { 514static struct dma_mapping_ops calgary_dma_ops = {
548 .alloc_coherent = calgary_alloc_coherent, 515 .alloc_coherent = calgary_alloc_coherent,
549 .map_single = calgary_map_single, 516 .map_single = calgary_map_single,
550 .unmap_single = calgary_unmap_single, 517 .unmap_single = calgary_unmap_single,
@@ -1241,6 +1208,16 @@ static int __init calgary_init(void)
1241 goto error; 1208 goto error;
1242 } while (1); 1209 } while (1);
1243 1210
1211 dev = NULL;
1212 for_each_pci_dev(dev) {
1213 struct iommu_table *tbl;
1214
1215 tbl = find_iommu_table(&dev->dev);
1216
1217 if (translation_enabled(tbl))
1218 dev->dev.archdata.dma_ops = &calgary_dma_ops;
1219 }
1220
1244 return ret; 1221 return ret;
1245 1222
1246error: 1223error:
@@ -1262,6 +1239,7 @@ error:
1262 calgary_disable_translation(dev); 1239 calgary_disable_translation(dev);
1263 calgary_free_bus(dev); 1240 calgary_free_bus(dev);
1264 pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ 1241 pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
1242 dev->dev.archdata.dma_ops = NULL;
1265 } while (1); 1243 } while (1);
1266 1244
1267 return ret; 1245 return ret;
@@ -1503,6 +1481,10 @@ void __init detect_calgary(void)
1503 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, " 1481 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d, "
1504 "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size, 1482 "CONFIG_IOMMU_DEBUG is %s.\n", specified_table_size,
1505 debugging ? "enabled" : "disabled"); 1483 debugging ? "enabled" : "disabled");
1484
1485 /* swiotlb for devices that aren't behind the Calgary. */
1486 if (max_pfn > MAX_DMA32_PFN)
1487 swiotlb = 1;
1506 } 1488 }
1507 return; 1489 return;
1508 1490
@@ -1519,7 +1501,7 @@ int __init calgary_iommu_init(void)
1519{ 1501{
1520 int ret; 1502 int ret;
1521 1503
1522 if (no_iommu || swiotlb) 1504 if (no_iommu || (swiotlb && !calgary_detected))
1523 return -ENODEV; 1505 return -ENODEV;
1524 1506
1525 if (!calgary_detected) 1507 if (!calgary_detected)
@@ -1532,15 +1514,14 @@ int __init calgary_iommu_init(void)
1532 if (ret) { 1514 if (ret) {
1533 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, " 1515 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
1534 "falling back to no_iommu\n", ret); 1516 "falling back to no_iommu\n", ret);
1535 if (max_pfn > MAX_DMA32_PFN)
1536 printk(KERN_ERR "WARNING more than 4GB of memory, "
1537 "32bit PCI may malfunction.\n");
1538 return ret; 1517 return ret;
1539 } 1518 }
1540 1519
1541 force_iommu = 1; 1520 force_iommu = 1;
1542 bad_dma_address = 0x0; 1521 bad_dma_address = 0x0;
1543 dma_ops = &calgary_dma_ops; 1522 /* dma_ops is set to swiotlb or nommu */
1523 if (!dma_ops)
1524 dma_ops = &nommu_dma_ops;
1544 1525
1545 return 0; 1526 return 0;
1546} 1527}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 88ddd04cfa98..f704cb51ff82 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,7 +11,7 @@
11 11
12static int forbid_dac __read_mostly; 12static int forbid_dac __read_mostly;
13 13
14const struct dma_mapping_ops *dma_ops; 14struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops); 15EXPORT_SYMBOL(dma_ops);
16 16
17static int iommu_sac_force __read_mostly; 17static int iommu_sac_force __read_mostly;
@@ -123,6 +123,14 @@ void __init pci_iommu_alloc(void)
123 123
124 pci_swiotlb_init(); 124 pci_swiotlb_init();
125} 125}
126
127unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
128{
129 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
130
131 return size >> PAGE_SHIFT;
132}
133EXPORT_SYMBOL(iommu_num_pages);
126#endif 134#endif
127 135
128/* 136/*
@@ -192,126 +200,10 @@ static __init int iommu_setup(char *p)
192} 200}
193early_param("iommu", iommu_setup); 201early_param("iommu", iommu_setup);
194 202
195#ifdef CONFIG_X86_32
196int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
197 dma_addr_t device_addr, size_t size, int flags)
198{
199 void __iomem *mem_base = NULL;
200 int pages = size >> PAGE_SHIFT;
201 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
202
203 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
204 goto out;
205 if (!size)
206 goto out;
207 if (dev->dma_mem)
208 goto out;
209
210 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
211
212 mem_base = ioremap(bus_addr, size);
213 if (!mem_base)
214 goto out;
215
216 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
217 if (!dev->dma_mem)
218 goto out;
219 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
220 if (!dev->dma_mem->bitmap)
221 goto free1_out;
222
223 dev->dma_mem->virt_base = mem_base;
224 dev->dma_mem->device_base = device_addr;
225 dev->dma_mem->size = pages;
226 dev->dma_mem->flags = flags;
227
228 if (flags & DMA_MEMORY_MAP)
229 return DMA_MEMORY_MAP;
230
231 return DMA_MEMORY_IO;
232
233 free1_out:
234 kfree(dev->dma_mem);
235 out:
236 if (mem_base)
237 iounmap(mem_base);
238 return 0;
239}
240EXPORT_SYMBOL(dma_declare_coherent_memory);
241
242void dma_release_declared_memory(struct device *dev)
243{
244 struct dma_coherent_mem *mem = dev->dma_mem;
245
246 if (!mem)
247 return;
248 dev->dma_mem = NULL;
249 iounmap(mem->virt_base);
250 kfree(mem->bitmap);
251 kfree(mem);
252}
253EXPORT_SYMBOL(dma_release_declared_memory);
254
255void *dma_mark_declared_memory_occupied(struct device *dev,
256 dma_addr_t device_addr, size_t size)
257{
258 struct dma_coherent_mem *mem = dev->dma_mem;
259 int pos, err;
260 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
261
262 pages >>= PAGE_SHIFT;
263
264 if (!mem)
265 return ERR_PTR(-EINVAL);
266
267 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
268 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
269 if (err != 0)
270 return ERR_PTR(err);
271 return mem->virt_base + (pos << PAGE_SHIFT);
272}
273EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
274
275static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
276 dma_addr_t *dma_handle, void **ret)
277{
278 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
279 int order = get_order(size);
280
281 if (mem) {
282 int page = bitmap_find_free_region(mem->bitmap, mem->size,
283 order);
284 if (page >= 0) {
285 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
286 *ret = mem->virt_base + (page << PAGE_SHIFT);
287 memset(*ret, 0, size);
288 }
289 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
290 *ret = NULL;
291 }
292 return (mem != NULL);
293}
294
295static int dma_release_coherent(struct device *dev, int order, void *vaddr)
296{
297 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
298
299 if (mem && vaddr >= mem->virt_base && vaddr <
300 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
301 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
302
303 bitmap_release_region(mem->bitmap, page, order);
304 return 1;
305 }
306 return 0;
307}
308#else
309#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
310#define dma_release_coherent(dev, order, vaddr) (0)
311#endif /* CONFIG_X86_32 */
312
313int dma_supported(struct device *dev, u64 mask) 203int dma_supported(struct device *dev, u64 mask)
314{ 204{
205 struct dma_mapping_ops *ops = get_dma_ops(dev);
206
315#ifdef CONFIG_PCI 207#ifdef CONFIG_PCI
316 if (mask > 0xffffffff && forbid_dac > 0) { 208 if (mask > 0xffffffff && forbid_dac > 0) {
317 dev_info(dev, "PCI: Disallowing DAC for device\n"); 209 dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -319,8 +211,8 @@ int dma_supported(struct device *dev, u64 mask)
319 } 211 }
320#endif 212#endif
321 213
322 if (dma_ops->dma_supported) 214 if (ops->dma_supported)
323 return dma_ops->dma_supported(dev, mask); 215 return ops->dma_supported(dev, mask);
324 216
325 /* Copied from i386. Doesn't make much sense, because it will 217 /* Copied from i386. Doesn't make much sense, because it will
326 only work for pci_alloc_coherent. 218 only work for pci_alloc_coherent.
@@ -367,6 +259,7 @@ void *
367dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 259dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
368 gfp_t gfp) 260 gfp_t gfp)
369{ 261{
262 struct dma_mapping_ops *ops = get_dma_ops(dev);
370 void *memory = NULL; 263 void *memory = NULL;
371 struct page *page; 264 struct page *page;
372 unsigned long dma_mask = 0; 265 unsigned long dma_mask = 0;
@@ -376,7 +269,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
376 /* ignore region specifiers */ 269 /* ignore region specifiers */
377 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 270 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
378 271
379 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) 272 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
380 return memory; 273 return memory;
381 274
382 if (!dev) { 275 if (!dev) {
@@ -435,8 +328,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
435 /* Let low level make its own zone decisions */ 328 /* Let low level make its own zone decisions */
436 gfp &= ~(GFP_DMA32|GFP_DMA); 329 gfp &= ~(GFP_DMA32|GFP_DMA);
437 330
438 if (dma_ops->alloc_coherent) 331 if (ops->alloc_coherent)
439 return dma_ops->alloc_coherent(dev, size, 332 return ops->alloc_coherent(dev, size,
440 dma_handle, gfp); 333 dma_handle, gfp);
441 return NULL; 334 return NULL;
442 } 335 }
@@ -448,14 +341,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
448 } 341 }
449 } 342 }
450 343
451 if (dma_ops->alloc_coherent) { 344 if (ops->alloc_coherent) {
452 free_pages((unsigned long)memory, get_order(size)); 345 free_pages((unsigned long)memory, get_order(size));
453 gfp &= ~(GFP_DMA|GFP_DMA32); 346 gfp &= ~(GFP_DMA|GFP_DMA32);
454 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); 347 return ops->alloc_coherent(dev, size, dma_handle, gfp);
455 } 348 }
456 349
457 if (dma_ops->map_simple) { 350 if (ops->map_simple) {
458 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), 351 *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
459 size, 352 size,
460 PCI_DMA_BIDIRECTIONAL); 353 PCI_DMA_BIDIRECTIONAL);
461 if (*dma_handle != bad_dma_address) 354 if (*dma_handle != bad_dma_address)
@@ -477,12 +370,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
477void dma_free_coherent(struct device *dev, size_t size, 370void dma_free_coherent(struct device *dev, size_t size,
478 void *vaddr, dma_addr_t bus) 371 void *vaddr, dma_addr_t bus)
479{ 372{
373 struct dma_mapping_ops *ops = get_dma_ops(dev);
374
480 int order = get_order(size); 375 int order = get_order(size);
481 WARN_ON(irqs_disabled()); /* for portability */ 376 WARN_ON(irqs_disabled()); /* for portability */
482 if (dma_release_coherent(dev, order, vaddr)) 377 if (dma_release_from_coherent(dev, order, vaddr))
483 return; 378 return;
484 if (dma_ops->unmap_single) 379 if (ops->unmap_single)
485 dma_ops->unmap_single(dev, bus, size, 0); 380 ops->unmap_single(dev, bus, size, 0);
486 free_pages((unsigned long)vaddr, order); 381 free_pages((unsigned long)vaddr, order);
487} 382}
488EXPORT_SYMBOL(dma_free_coherent); 383EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index df5f142657d2..49285f8fd4d5 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -67,9 +67,6 @@ static u32 gart_unmapped_entry;
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) 67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) 68#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69 69
70#define to_pages(addr, size) \
71 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
72
73#define EMERGENCY_PAGES 32 /* = 128KB */ 70#define EMERGENCY_PAGES 32 /* = 128KB */
74 71
75#ifdef CONFIG_AGP 72#ifdef CONFIG_AGP
@@ -241,7 +238,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
241static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
242 size_t size, int dir) 239 size_t size, int dir)
243{ 240{
244 unsigned long npages = to_pages(phys_mem, size); 241 unsigned long npages = iommu_num_pages(phys_mem, size);
245 unsigned long iommu_page = alloc_iommu(dev, npages); 242 unsigned long iommu_page = alloc_iommu(dev, npages);
246 int i; 243 int i;
247 244
@@ -304,7 +301,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
304 return; 301 return;
305 302
306 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; 303 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
307 npages = to_pages(dma_addr, size); 304 npages = iommu_num_pages(dma_addr, size);
308 for (i = 0; i < npages; i++) { 305 for (i = 0; i < npages; i++) {
309 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; 306 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
310 CLEAR_LEAK(iommu_page + i); 307 CLEAR_LEAK(iommu_page + i);
@@ -387,7 +384,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
387 } 384 }
388 385
389 addr = phys_addr; 386 addr = phys_addr;
390 pages = to_pages(s->offset, s->length); 387 pages = iommu_num_pages(s->offset, s->length);
391 while (pages--) { 388 while (pages--) {
392 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); 389 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
393 SET_LEAK(iommu_page); 390 SET_LEAK(iommu_page);
@@ -470,7 +467,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
470 467
471 seg_size += s->length; 468 seg_size += s->length;
472 need = nextneed; 469 need = nextneed;
473 pages += to_pages(s->offset, s->length); 470 pages += iommu_num_pages(s->offset, s->length);
474 ps = s; 471 ps = s;
475 } 472 }
476 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 473 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
@@ -692,8 +689,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
692 689
693extern int agp_amd64_init(void); 690extern int agp_amd64_init(void);
694 691
695static const struct dma_mapping_ops gart_dma_ops = { 692static struct dma_mapping_ops gart_dma_ops = {
696 .mapping_error = NULL,
697 .map_single = gart_map_single, 693 .map_single = gart_map_single,
698 .map_simple = gart_map_simple, 694 .map_simple = gart_map_simple,
699 .unmap_single = gart_unmap_single, 695 .unmap_single = gart_unmap_single,
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 792b9179eff3..3f91f71cdc3e 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
72 return nents; 72 return nents;
73} 73}
74 74
75/* Make sure we keep the same behaviour */ 75struct dma_mapping_ops nommu_dma_ops = {
76static int nommu_mapping_error(dma_addr_t dma_addr)
77{
78#ifdef CONFIG_X86_32
79 return 0;
80#else
81 return (dma_addr == bad_dma_address);
82#endif
83}
84
85
86const struct dma_mapping_ops nommu_dma_ops = {
87 .map_single = nommu_map_single, 76 .map_single = nommu_map_single,
88 .map_sg = nommu_map_sg, 77 .map_sg = nommu_map_sg,
89 .mapping_error = nommu_mapping_error,
90 .is_phys = 1, 78 .is_phys = 1,
91}; 79};
92 80
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 20df839b9c20..c4ce0332759e 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); 18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
19} 19}
20 20
21const struct dma_mapping_ops swiotlb_dma_ops = { 21struct dma_mapping_ops swiotlb_dma_ops = {
22 .mapping_error = swiotlb_dma_mapping_error, 22 .mapping_error = swiotlb_dma_mapping_error,
23 .alloc_coherent = swiotlb_alloc_coherent, 23 .alloc_coherent = swiotlb_alloc_coherent,
24 .free_coherent = swiotlb_free_coherent, 24 .free_coherent = swiotlb_free_coherent,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 06a9f643817e..724adfc63cb9 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -414,25 +414,20 @@ void native_machine_shutdown(void)
414 414
415 /* The boot cpu is always logical cpu 0 */ 415 /* The boot cpu is always logical cpu 0 */
416 int reboot_cpu_id = 0; 416 int reboot_cpu_id = 0;
417 cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
418 417
419#ifdef CONFIG_X86_32 418#ifdef CONFIG_X86_32
420 /* See if there has been given a command line override */ 419 /* See if there has been given a command line override */
421 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && 420 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
422 cpu_online(reboot_cpu)) { 421 cpu_online(reboot_cpu))
423 reboot_cpu_id = reboot_cpu; 422 reboot_cpu_id = reboot_cpu;
424 cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
425 }
426#endif 423#endif
427 424
428 /* Make certain the cpu I'm about to reboot on is online */ 425 /* Make certain the cpu I'm about to reboot on is online */
429 if (!cpu_online(reboot_cpu_id)) { 426 if (!cpu_online(reboot_cpu_id))
430 reboot_cpu_id = smp_processor_id(); 427 reboot_cpu_id = smp_processor_id();
431 cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
432 }
433 428
434 /* Make certain I only run on the appropriate processor */ 429 /* Make certain I only run on the appropriate processor */
435 set_cpus_allowed_ptr(current, newmask); 430 set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
436 431
437 /* O.K Now that I'm on the appropriate processor, 432 /* O.K Now that I'm on the appropriate processor,
438 * stop all of the others. 433 * stop all of the others.
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index c30fe25d470d..703310a99023 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -20,11 +20,44 @@
20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
21#define PAE_PGD_ATTR (_PAGE_PRESENT) 21#define PAE_PGD_ATTR (_PAGE_PRESENT)
22 22
23/* control_page + PAGE_SIZE/2 ~ control_page + PAGE_SIZE * 3/4 are
24 * used to save some data for jumping back
25 */
26#define DATA(offset) (PAGE_SIZE/2+(offset))
27
28/* Minimal CPU state */
29#define ESP DATA(0x0)
30#define CR0 DATA(0x4)
31#define CR3 DATA(0x8)
32#define CR4 DATA(0xc)
33
34/* other data */
35#define CP_VA_CONTROL_PAGE DATA(0x10)
36#define CP_PA_PGD DATA(0x14)
37#define CP_PA_SWAP_PAGE DATA(0x18)
38#define CP_PA_BACKUP_PAGES_MAP DATA(0x1c)
39
23 .text 40 .text
24 .align PAGE_SIZE 41 .align PAGE_SIZE
25 .globl relocate_kernel 42 .globl relocate_kernel
26relocate_kernel: 43relocate_kernel:
27 movl 8(%esp), %ebp /* list of pages */ 44 /* Save the CPU context, used for jumping back */
45
46 pushl %ebx
47 pushl %esi
48 pushl %edi
49 pushl %ebp
50 pushf
51
52 movl 20+8(%esp), %ebp /* list of pages */
53 movl PTR(VA_CONTROL_PAGE)(%ebp), %edi
54 movl %esp, ESP(%edi)
55 movl %cr0, %eax
56 movl %eax, CR0(%edi)
57 movl %cr3, %eax
58 movl %eax, CR3(%edi)
59 movl %cr4, %eax
60 movl %eax, CR4(%edi)
28 61
29#ifdef CONFIG_X86_PAE 62#ifdef CONFIG_X86_PAE
30 /* map the control page at its virtual address */ 63 /* map the control page at its virtual address */
@@ -138,15 +171,25 @@ relocate_kernel:
138 171
139relocate_new_kernel: 172relocate_new_kernel:
140 /* read the arguments and say goodbye to the stack */ 173 /* read the arguments and say goodbye to the stack */
141 movl 4(%esp), %ebx /* page_list */ 174 movl 20+4(%esp), %ebx /* page_list */
142 movl 8(%esp), %ebp /* list of pages */ 175 movl 20+8(%esp), %ebp /* list of pages */
143 movl 12(%esp), %edx /* start address */ 176 movl 20+12(%esp), %edx /* start address */
144 movl 16(%esp), %ecx /* cpu_has_pae */ 177 movl 20+16(%esp), %ecx /* cpu_has_pae */
178 movl 20+20(%esp), %esi /* preserve_context */
145 179
146 /* zero out flags, and disable interrupts */ 180 /* zero out flags, and disable interrupts */
147 pushl $0 181 pushl $0
148 popfl 182 popfl
149 183
184 /* save some information for jumping back */
185 movl PTR(VA_CONTROL_PAGE)(%ebp), %edi
186 movl %edi, CP_VA_CONTROL_PAGE(%edi)
187 movl PTR(PA_PGD)(%ebp), %eax
188 movl %eax, CP_PA_PGD(%edi)
189 movl PTR(PA_SWAP_PAGE)(%ebp), %eax
190 movl %eax, CP_PA_SWAP_PAGE(%edi)
191 movl %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
192
150 /* get physical address of control page now */ 193 /* get physical address of control page now */
151 /* this is impossible after page table switch */ 194 /* this is impossible after page table switch */
152 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi 195 movl PTR(PA_CONTROL_PAGE)(%ebp), %edi
@@ -197,8 +240,90 @@ identity_mapped:
197 xorl %eax, %eax 240 xorl %eax, %eax
198 movl %eax, %cr3 241 movl %eax, %cr3
199 242
243 movl CP_PA_SWAP_PAGE(%edi), %eax
244 pushl %eax
245 pushl %ebx
246 call swap_pages
247 addl $8, %esp
248
249 /* To be certain of avoiding problems with self-modifying code
250 * I need to execute a serializing instruction here.
251 * So I flush the TLB, it's handy, and not processor dependent.
252 */
253 xorl %eax, %eax
254 movl %eax, %cr3
255
256 /* set all of the registers to known values */
257 /* leave %esp alone */
258
259 testl %esi, %esi
260 jnz 1f
261 xorl %edi, %edi
262 xorl %eax, %eax
263 xorl %ebx, %ebx
264 xorl %ecx, %ecx
265 xorl %edx, %edx
266 xorl %esi, %esi
267 xorl %ebp, %ebp
268 ret
2691:
270 popl %edx
271 movl CP_PA_SWAP_PAGE(%edi), %esp
272 addl $PAGE_SIZE, %esp
2732:
274 call *%edx
275
276 /* get the re-entry point of the peer system */
277 movl 0(%esp), %ebp
278 call 1f
2791:
280 popl %ebx
281 subl $(1b - relocate_kernel), %ebx
282 movl CP_VA_CONTROL_PAGE(%ebx), %edi
283 lea PAGE_SIZE(%ebx), %esp
284 movl CP_PA_SWAP_PAGE(%ebx), %eax
285 movl CP_PA_BACKUP_PAGES_MAP(%ebx), %edx
286 pushl %eax
287 pushl %edx
288 call swap_pages
289 addl $8, %esp
290 movl CP_PA_PGD(%ebx), %eax
291 movl %eax, %cr3
292 movl %cr0, %eax
293 orl $(1<<31), %eax
294 movl %eax, %cr0
295 lea PAGE_SIZE(%edi), %esp
296 movl %edi, %eax
297 addl $(virtual_mapped - relocate_kernel), %eax
298 pushl %eax
299 ret
300
301virtual_mapped:
302 movl CR4(%edi), %eax
303 movl %eax, %cr4
304 movl CR3(%edi), %eax
305 movl %eax, %cr3
306 movl CR0(%edi), %eax
307 movl %eax, %cr0
308 movl ESP(%edi), %esp
309 movl %ebp, %eax
310
311 popf
312 popl %ebp
313 popl %edi
314 popl %esi
315 popl %ebx
316 ret
317
200 /* Do the copies */ 318 /* Do the copies */
201 movl %ebx, %ecx 319swap_pages:
320 movl 8(%esp), %edx
321 movl 4(%esp), %ecx
322 pushl %ebp
323 pushl %ebx
324 pushl %edi
325 pushl %esi
326 movl %ecx, %ebx
202 jmp 1f 327 jmp 1f
203 328
2040: /* top, read another word from the indirection page */ 3290: /* top, read another word from the indirection page */
@@ -226,27 +351,28 @@ identity_mapped:
226 movl %ecx, %esi /* For every source page do a copy */ 351 movl %ecx, %esi /* For every source page do a copy */
227 andl $0xfffff000, %esi 352 andl $0xfffff000, %esi
228 353
354 movl %edi, %eax
355 movl %esi, %ebp
356
357 movl %edx, %edi
229 movl $1024, %ecx 358 movl $1024, %ecx
230 rep ; movsl 359 rep ; movsl
231 jmp 0b
232 360
2333: 361 movl %ebp, %edi
234 362 movl %eax, %esi
235 /* To be certain of avoiding problems with self-modifying code 363 movl $1024, %ecx
236 * I need to execute a serializing instruction here. 364 rep ; movsl
237 * So I flush the TLB, it's handy, and not processor dependent.
238 */
239 xorl %eax, %eax
240 movl %eax, %cr3
241 365
242 /* set all of the registers to known values */ 366 movl %eax, %edi
243 /* leave %esp alone */ 367 movl %edx, %esi
368 movl $1024, %ecx
369 rep ; movsl
244 370
245 xorl %eax, %eax 371 lea PAGE_SIZE(%ebp), %esi
246 xorl %ebx, %ebx 372 jmp 0b
247 xorl %ecx, %ecx 3733:
248 xorl %edx, %edx 374 popl %esi
249 xorl %esi, %esi 375 popl %edi
250 xorl %edi, %edi 376 popl %ebx
251 xorl %ebp, %ebp 377 popl %ebp
252 ret 378 ret
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b520dae02bf4..2d888586385d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -788,10 +788,6 @@ void __init setup_arch(char **cmdline_p)
788 788
789 initmem_init(0, max_pfn); 789 initmem_init(0, max_pfn);
790 790
791#ifdef CONFIG_X86_64
792 dma32_reserve_bootmem();
793#endif
794
795#ifdef CONFIG_ACPI_SLEEP 791#ifdef CONFIG_ACPI_SLEEP
796 /* 792 /*
797 * Reserve low memory region for sleep support. 793 * Reserve low memory region for sleep support.
@@ -806,6 +802,15 @@ void __init setup_arch(char **cmdline_p)
806#endif 802#endif
807 reserve_crashkernel(); 803 reserve_crashkernel();
808 804
805#ifdef CONFIG_X86_64
806 /*
807 * dma32_reserve_bootmem() allocates bootmem which may conflict
808 * with the crashkernel command line, so do that after
809 * reserve_crashkernel()
810 */
811 dma32_reserve_bootmem();
812#endif
813
809 reserve_ibft_region(); 814 reserve_ibft_region();
810 815
811#ifdef CONFIG_KVM_CLOCK 816#ifdef CONFIG_KVM_CLOCK
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index f7745f94c006..76e305e064f9 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -80,24 +80,6 @@ static void __init setup_per_cpu_maps(void)
80#endif 80#endif
81} 81}
82 82
83#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
84cpumask_t *cpumask_of_cpu_map __read_mostly;
85EXPORT_SYMBOL(cpumask_of_cpu_map);
86
87/* requires nr_cpu_ids to be initialized */
88static void __init setup_cpumask_of_cpu(void)
89{
90 int i;
91
92 /* alloc_bootmem zeroes memory */
93 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
94 for (i = 0; i < nr_cpu_ids; i++)
95 cpu_set(i, cpumask_of_cpu_map[i]);
96}
97#else
98static inline void setup_cpumask_of_cpu(void) { }
99#endif
100
101#ifdef CONFIG_X86_32 83#ifdef CONFIG_X86_32
102/* 84/*
103 * Great future not-so-futuristic plan: make i386 and x86_64 do it 85 * Great future not-so-futuristic plan: make i386 and x86_64 do it
@@ -197,9 +179,6 @@ void __init setup_per_cpu_areas(void)
197 179
198 /* Setup node to cpumask map */ 180 /* Setup node to cpumask map */
199 setup_node_to_cpumask_map(); 181 setup_node_to_cpumask_map();
200
201 /* Setup cpumask_of_cpu map */
202 setup_cpumask_of_cpu();
203} 182}
204 183
205#endif 184#endif