diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/genapic_64.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/ldt.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/microcode.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 130 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/reboot.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 21 |
15 files changed, 54 insertions, 215 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 9220cf46aa10..c2502eb9aa83 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -73,7 +73,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
73 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 73 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
74 | 74 | ||
75 | cpumask_t saved_mask; | 75 | cpumask_t saved_mask; |
76 | cpumask_of_cpu_ptr(new_mask, cpu); | ||
77 | int retval; | 76 | int retval; |
78 | unsigned int eax, ebx, ecx, edx; | 77 | unsigned int eax, ebx, ecx, edx; |
79 | unsigned int edx_part; | 78 | unsigned int edx_part; |
@@ -92,7 +91,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, | |||
92 | 91 | ||
93 | /* Make sure we are running on right CPU */ | 92 | /* Make sure we are running on right CPU */ |
94 | saved_mask = current->cpus_allowed; | 93 | saved_mask = current->cpus_allowed; |
95 | retval = set_cpus_allowed_ptr(current, new_mask); | 94 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
96 | if (retval) | 95 | if (retval) |
97 | return -1; | 96 | return -1; |
98 | 97 | ||
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 74697408576f..22d7d050905d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -29,9 +29,6 @@ | |||
29 | 29 | ||
30 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) | 30 | #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) |
31 | 31 | ||
32 | #define to_pages(addr, size) \ | ||
33 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | ||
34 | |||
35 | #define EXIT_LOOP_COUNT 10000000 | 32 | #define EXIT_LOOP_COUNT 10000000 |
36 | 33 | ||
37 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); | 34 | static DEFINE_RWLOCK(amd_iommu_devtable_lock); |
@@ -185,7 +182,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | |||
185 | u64 address, size_t size) | 182 | u64 address, size_t size) |
186 | { | 183 | { |
187 | int s = 0; | 184 | int s = 0; |
188 | unsigned pages = to_pages(address, size); | 185 | unsigned pages = iommu_num_pages(address, size); |
189 | 186 | ||
190 | address &= PAGE_MASK; | 187 | address &= PAGE_MASK; |
191 | 188 | ||
@@ -557,8 +554,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, | |||
557 | if (iommu->exclusion_start && | 554 | if (iommu->exclusion_start && |
558 | iommu->exclusion_start < dma_dom->aperture_size) { | 555 | iommu->exclusion_start < dma_dom->aperture_size) { |
559 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; | 556 | unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; |
560 | int pages = to_pages(iommu->exclusion_start, | 557 | int pages = iommu_num_pages(iommu->exclusion_start, |
561 | iommu->exclusion_length); | 558 | iommu->exclusion_length); |
562 | dma_ops_reserve_addresses(dma_dom, startpage, pages); | 559 | dma_ops_reserve_addresses(dma_dom, startpage, pages); |
563 | } | 560 | } |
564 | 561 | ||
@@ -767,7 +764,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
767 | unsigned int pages; | 764 | unsigned int pages; |
768 | int i; | 765 | int i; |
769 | 766 | ||
770 | pages = to_pages(paddr, size); | 767 | pages = iommu_num_pages(paddr, size); |
771 | paddr &= PAGE_MASK; | 768 | paddr &= PAGE_MASK; |
772 | 769 | ||
773 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); | 770 | address = dma_ops_alloc_addresses(dev, dma_dom, pages); |
@@ -802,7 +799,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
802 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) | 799 | if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) |
803 | return; | 800 | return; |
804 | 801 | ||
805 | pages = to_pages(dma_addr, size); | 802 | pages = iommu_num_pages(dma_addr, size); |
806 | dma_addr &= PAGE_MASK; | 803 | dma_addr &= PAGE_MASK; |
807 | start = dma_addr; | 804 | start = dma_addr; |
808 | 805 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ff2fff56f0a8..dd097b835839 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd) | |||
200 | static void drv_write(struct drv_cmd *cmd) | 200 | static void drv_write(struct drv_cmd *cmd) |
201 | { | 201 | { |
202 | cpumask_t saved_mask = current->cpus_allowed; | 202 | cpumask_t saved_mask = current->cpus_allowed; |
203 | cpumask_of_cpu_ptr_declare(cpu_mask); | ||
204 | unsigned int i; | 203 | unsigned int i; |
205 | 204 | ||
206 | for_each_cpu_mask_nr(i, cmd->mask) { | 205 | for_each_cpu_mask_nr(i, cmd->mask) { |
207 | cpumask_of_cpu_ptr_next(cpu_mask, i); | 206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); |
208 | set_cpus_allowed_ptr(current, cpu_mask); | ||
209 | do_drv_write(cmd); | 207 | do_drv_write(cmd); |
210 | } | 208 | } |
211 | 209 | ||
@@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
269 | } aperf_cur, mperf_cur; | 267 | } aperf_cur, mperf_cur; |
270 | 268 | ||
271 | cpumask_t saved_mask; | 269 | cpumask_t saved_mask; |
272 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
273 | unsigned int perf_percent; | 270 | unsigned int perf_percent; |
274 | unsigned int retval; | 271 | unsigned int retval; |
275 | 272 | ||
276 | saved_mask = current->cpus_allowed; | 273 | saved_mask = current->cpus_allowed; |
277 | set_cpus_allowed_ptr(current, cpu_mask); | 274 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
278 | if (get_cpu() != cpu) { | 275 | if (get_cpu() != cpu) { |
279 | /* We were not able to run on requested processor */ | 276 | /* We were not able to run on requested processor */ |
280 | put_cpu(); | 277 | put_cpu(); |
@@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
340 | 337 | ||
341 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 338 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
342 | { | 339 | { |
343 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
344 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 340 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); |
345 | unsigned int freq; | 341 | unsigned int freq; |
346 | unsigned int cached_freq; | 342 | unsigned int cached_freq; |
@@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
353 | } | 349 | } |
354 | 350 | ||
355 | cached_freq = data->freq_table[data->acpi_data->state].frequency; | 351 | cached_freq = data->freq_table[data->acpi_data->state].frequency; |
356 | freq = extract_freq(get_cur_val(cpu_mask), data); | 352 | freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); |
357 | if (freq != cached_freq) { | 353 | if (freq != cached_freq) { |
358 | /* | 354 | /* |
359 | * The dreaded BIOS frequency change behind our back. | 355 | * The dreaded BIOS frequency change behind our back. |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 53c7b6936973..c45ca6d4dce1 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -479,12 +479,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi | |||
479 | static int check_supported_cpu(unsigned int cpu) | 479 | static int check_supported_cpu(unsigned int cpu) |
480 | { | 480 | { |
481 | cpumask_t oldmask; | 481 | cpumask_t oldmask; |
482 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
483 | u32 eax, ebx, ecx, edx; | 482 | u32 eax, ebx, ecx, edx; |
484 | unsigned int rc = 0; | 483 | unsigned int rc = 0; |
485 | 484 | ||
486 | oldmask = current->cpus_allowed; | 485 | oldmask = current->cpus_allowed; |
487 | set_cpus_allowed_ptr(current, cpu_mask); | 486 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
488 | 487 | ||
489 | if (smp_processor_id() != cpu) { | 488 | if (smp_processor_id() != cpu) { |
490 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); | 489 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); |
@@ -1017,7 +1016,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1017 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) | 1016 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) |
1018 | { | 1017 | { |
1019 | cpumask_t oldmask; | 1018 | cpumask_t oldmask; |
1020 | cpumask_of_cpu_ptr(cpu_mask, pol->cpu); | ||
1021 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1019 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1022 | u32 checkfid; | 1020 | u32 checkfid; |
1023 | u32 checkvid; | 1021 | u32 checkvid; |
@@ -1032,7 +1030,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
1032 | 1030 | ||
1033 | /* only run on specific CPU from here on */ | 1031 | /* only run on specific CPU from here on */ |
1034 | oldmask = current->cpus_allowed; | 1032 | oldmask = current->cpus_allowed; |
1035 | set_cpus_allowed_ptr(current, cpu_mask); | 1033 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); |
1036 | 1034 | ||
1037 | if (smp_processor_id() != pol->cpu) { | 1035 | if (smp_processor_id() != pol->cpu) { |
1038 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1036 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1107,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1107 | { | 1105 | { |
1108 | struct powernow_k8_data *data; | 1106 | struct powernow_k8_data *data; |
1109 | cpumask_t oldmask; | 1107 | cpumask_t oldmask; |
1110 | cpumask_of_cpu_ptr_declare(newmask); | ||
1111 | int rc; | 1108 | int rc; |
1112 | 1109 | ||
1113 | if (!cpu_online(pol->cpu)) | 1110 | if (!cpu_online(pol->cpu)) |
@@ -1159,8 +1156,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1159 | 1156 | ||
1160 | /* only run on specific CPU from here on */ | 1157 | /* only run on specific CPU from here on */ |
1161 | oldmask = current->cpus_allowed; | 1158 | oldmask = current->cpus_allowed; |
1162 | cpumask_of_cpu_ptr_next(newmask, pol->cpu); | 1159 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); |
1163 | set_cpus_allowed_ptr(current, newmask); | ||
1164 | 1160 | ||
1165 | if (smp_processor_id() != pol->cpu) { | 1161 | if (smp_processor_id() != pol->cpu) { |
1166 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1162 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1182,7 +1178,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1182 | set_cpus_allowed_ptr(current, &oldmask); | 1178 | set_cpus_allowed_ptr(current, &oldmask); |
1183 | 1179 | ||
1184 | if (cpu_family == CPU_HW_PSTATE) | 1180 | if (cpu_family == CPU_HW_PSTATE) |
1185 | pol->cpus = *newmask; | 1181 | pol->cpus = cpumask_of_cpu(pol->cpu); |
1186 | else | 1182 | else |
1187 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); | 1183 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); |
1188 | data->available_cores = &(pol->cpus); | 1184 | data->available_cores = &(pol->cpus); |
@@ -1248,7 +1244,6 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1248 | { | 1244 | { |
1249 | struct powernow_k8_data *data; | 1245 | struct powernow_k8_data *data; |
1250 | cpumask_t oldmask = current->cpus_allowed; | 1246 | cpumask_t oldmask = current->cpus_allowed; |
1251 | cpumask_of_cpu_ptr(newmask, cpu); | ||
1252 | unsigned int khz = 0; | 1247 | unsigned int khz = 0; |
1253 | unsigned int first; | 1248 | unsigned int first; |
1254 | 1249 | ||
@@ -1258,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1258 | if (!data) | 1253 | if (!data) |
1259 | return -EINVAL; | 1254 | return -EINVAL; |
1260 | 1255 | ||
1261 | set_cpus_allowed_ptr(current, newmask); | 1256 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
1262 | if (smp_processor_id() != cpu) { | 1257 | if (smp_processor_id() != cpu) { |
1263 | printk(KERN_ERR PFX | 1258 | printk(KERN_ERR PFX |
1264 | "limiting to CPU %d failed in powernowk8_get\n", cpu); | 1259 | "limiting to CPU %d failed in powernowk8_get\n", cpu); |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index ca2ac13b7af2..15e13c01cc36 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
324 | unsigned l, h; | 324 | unsigned l, h; |
325 | unsigned clock_freq; | 325 | unsigned clock_freq; |
326 | cpumask_t saved_mask; | 326 | cpumask_t saved_mask; |
327 | cpumask_of_cpu_ptr(new_mask, cpu); | ||
328 | 327 | ||
329 | saved_mask = current->cpus_allowed; | 328 | saved_mask = current->cpus_allowed; |
330 | set_cpus_allowed_ptr(current, new_mask); | 329 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
331 | if (smp_processor_id() != cpu) | 330 | if (smp_processor_id() != cpu) |
332 | return 0; | 331 | return 0; |
333 | 332 | ||
@@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
585 | * Best effort undo.. | 584 | * Best effort undo.. |
586 | */ | 585 | */ |
587 | 586 | ||
588 | if (!cpus_empty(*covered_cpus)) { | 587 | if (!cpus_empty(*covered_cpus)) |
589 | cpumask_of_cpu_ptr_declare(new_mask); | ||
590 | |||
591 | for_each_cpu_mask_nr(j, *covered_cpus) { | 588 | for_each_cpu_mask_nr(j, *covered_cpus) { |
592 | cpumask_of_cpu_ptr_next(new_mask, j); | 589 | set_cpus_allowed_ptr(current, |
593 | set_cpus_allowed_ptr(current, new_mask); | 590 | &cpumask_of_cpu(j)); |
594 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 591 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
595 | } | 592 | } |
596 | } | ||
597 | 593 | ||
598 | tmp = freqs.new; | 594 | tmp = freqs.new; |
599 | freqs.new = freqs.old; | 595 | freqs.new = freqs.old; |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 2f3728dc24f6..191f7263c61d 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) | |||
244 | 244 | ||
245 | static unsigned int speedstep_get(unsigned int cpu) | 245 | static unsigned int speedstep_get(unsigned int cpu) |
246 | { | 246 | { |
247 | cpumask_of_cpu_ptr(newmask, cpu); | 247 | return _speedstep_get(&cpumask_of_cpu(cpu)); |
248 | return _speedstep_get(newmask); | ||
249 | } | 248 | } |
250 | 249 | ||
251 | /** | 250 | /** |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 650d40f7912b..6b0a10b002f1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -516,7 +516,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
516 | unsigned long j; | 516 | unsigned long j; |
517 | int retval; | 517 | int retval; |
518 | cpumask_t oldmask; | 518 | cpumask_t oldmask; |
519 | cpumask_of_cpu_ptr(newmask, cpu); | ||
520 | 519 | ||
521 | if (num_cache_leaves == 0) | 520 | if (num_cache_leaves == 0) |
522 | return -ENOENT; | 521 | return -ENOENT; |
@@ -527,7 +526,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
527 | return -ENOMEM; | 526 | return -ENOMEM; |
528 | 527 | ||
529 | oldmask = current->cpus_allowed; | 528 | oldmask = current->cpus_allowed; |
530 | retval = set_cpus_allowed_ptr(current, newmask); | 529 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
531 | if (retval) | 530 | if (retval) |
532 | goto out; | 531 | goto out; |
533 | 532 | ||
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 1fa8be5bd217..eaff0bbb1444 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c | |||
@@ -99,3 +99,4 @@ int is_uv_system(void) | |||
99 | { | 99 | { |
100 | return uv_system_type != UV_NONE; | 100 | return uv_system_type != UV_NONE; |
101 | } | 101 | } |
102 | EXPORT_SYMBOL_GPL(is_uv_system); | ||
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 3fee2aa50f3f..b68e21f06f4f 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -62,12 +62,10 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
62 | 62 | ||
63 | if (reload) { | 63 | if (reload) { |
64 | #ifdef CONFIG_SMP | 64 | #ifdef CONFIG_SMP |
65 | cpumask_of_cpu_ptr_declare(mask); | ||
66 | |||
67 | preempt_disable(); | 65 | preempt_disable(); |
68 | load_LDT(pc); | 66 | load_LDT(pc); |
69 | cpumask_of_cpu_ptr_next(mask, smp_processor_id()); | 67 | if (!cpus_equal(current->mm->cpu_vm_mask, |
70 | if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) | 68 | cpumask_of_cpu(smp_processor_id()))) |
71 | smp_call_function(flush_ldt, current->mm, 1); | 69 | smp_call_function(flush_ldt, current->mm, 1); |
72 | preempt_enable(); | 70 | preempt_enable(); |
73 | #else | 71 | #else |
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 6994c751590e..652fa5c38ebe 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c | |||
@@ -388,7 +388,6 @@ static int do_microcode_update (void) | |||
388 | void *new_mc = NULL; | 388 | void *new_mc = NULL; |
389 | int cpu; | 389 | int cpu; |
390 | cpumask_t old; | 390 | cpumask_t old; |
391 | cpumask_of_cpu_ptr_declare(newmask); | ||
392 | 391 | ||
393 | old = current->cpus_allowed; | 392 | old = current->cpus_allowed; |
394 | 393 | ||
@@ -405,8 +404,7 @@ static int do_microcode_update (void) | |||
405 | 404 | ||
406 | if (!uci->valid) | 405 | if (!uci->valid) |
407 | continue; | 406 | continue; |
408 | cpumask_of_cpu_ptr_next(newmask, cpu); | 407 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
409 | set_cpus_allowed_ptr(current, newmask); | ||
410 | error = get_maching_microcode(new_mc, cpu); | 408 | error = get_maching_microcode(new_mc, cpu); |
411 | if (error < 0) | 409 | if (error < 0) |
412 | goto out; | 410 | goto out; |
@@ -576,7 +574,6 @@ static int apply_microcode_check_cpu(int cpu) | |||
576 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 574 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
577 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 575 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
578 | cpumask_t old; | 576 | cpumask_t old; |
579 | cpumask_of_cpu_ptr(newmask, cpu); | ||
580 | unsigned int val[2]; | 577 | unsigned int val[2]; |
581 | int err = 0; | 578 | int err = 0; |
582 | 579 | ||
@@ -585,7 +582,7 @@ static int apply_microcode_check_cpu(int cpu) | |||
585 | return 0; | 582 | return 0; |
586 | 583 | ||
587 | old = current->cpus_allowed; | 584 | old = current->cpus_allowed; |
588 | set_cpus_allowed_ptr(current, newmask); | 585 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
589 | 586 | ||
590 | /* Check if the microcode we have in memory matches the CPU */ | 587 | /* Check if the microcode we have in memory matches the CPU */ |
591 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | 588 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || |
@@ -623,12 +620,11 @@ static int apply_microcode_check_cpu(int cpu) | |||
623 | static void microcode_init_cpu(int cpu, int resume) | 620 | static void microcode_init_cpu(int cpu, int resume) |
624 | { | 621 | { |
625 | cpumask_t old; | 622 | cpumask_t old; |
626 | cpumask_of_cpu_ptr(newmask, cpu); | ||
627 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 623 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
628 | 624 | ||
629 | old = current->cpus_allowed; | 625 | old = current->cpus_allowed; |
630 | 626 | ||
631 | set_cpus_allowed_ptr(current, newmask); | 627 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
632 | mutex_lock(µcode_mutex); | 628 | mutex_lock(µcode_mutex); |
633 | collect_cpu_info(cpu); | 629 | collect_cpu_info(cpu); |
634 | if (uci->valid && system_state == SYSTEM_RUNNING && !resume) | 630 | if (uci->valid && system_state == SYSTEM_RUNNING && !resume) |
@@ -661,13 +657,10 @@ static ssize_t reload_store(struct sys_device *dev, | |||
661 | if (end == buf) | 657 | if (end == buf) |
662 | return -EINVAL; | 658 | return -EINVAL; |
663 | if (val == 1) { | 659 | if (val == 1) { |
664 | cpumask_t old; | 660 | cpumask_t old = current->cpus_allowed; |
665 | cpumask_of_cpu_ptr(newmask, cpu); | ||
666 | |||
667 | old = current->cpus_allowed; | ||
668 | 661 | ||
669 | get_online_cpus(); | 662 | get_online_cpus(); |
670 | set_cpus_allowed_ptr(current, newmask); | 663 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
671 | 664 | ||
672 | mutex_lock(µcode_mutex); | 665 | mutex_lock(µcode_mutex); |
673 | if (uci->valid) | 666 | if (uci->valid) |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 37544123896d..87d4d6964ec2 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -123,6 +123,14 @@ void __init pci_iommu_alloc(void) | |||
123 | 123 | ||
124 | pci_swiotlb_init(); | 124 | pci_swiotlb_init(); |
125 | } | 125 | } |
126 | |||
127 | unsigned long iommu_num_pages(unsigned long addr, unsigned long len) | ||
128 | { | ||
129 | unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE); | ||
130 | |||
131 | return size >> PAGE_SHIFT; | ||
132 | } | ||
133 | EXPORT_SYMBOL(iommu_num_pages); | ||
126 | #endif | 134 | #endif |
127 | 135 | ||
128 | /* | 136 | /* |
@@ -192,124 +200,6 @@ static __init int iommu_setup(char *p) | |||
192 | } | 200 | } |
193 | early_param("iommu", iommu_setup); | 201 | early_param("iommu", iommu_setup); |
194 | 202 | ||
195 | #ifdef CONFIG_X86_32 | ||
196 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
197 | dma_addr_t device_addr, size_t size, int flags) | ||
198 | { | ||
199 | void __iomem *mem_base = NULL; | ||
200 | int pages = size >> PAGE_SHIFT; | ||
201 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
202 | |||
203 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
204 | goto out; | ||
205 | if (!size) | ||
206 | goto out; | ||
207 | if (dev->dma_mem) | ||
208 | goto out; | ||
209 | |||
210 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
211 | |||
212 | mem_base = ioremap(bus_addr, size); | ||
213 | if (!mem_base) | ||
214 | goto out; | ||
215 | |||
216 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
217 | if (!dev->dma_mem) | ||
218 | goto out; | ||
219 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
220 | if (!dev->dma_mem->bitmap) | ||
221 | goto free1_out; | ||
222 | |||
223 | dev->dma_mem->virt_base = mem_base; | ||
224 | dev->dma_mem->device_base = device_addr; | ||
225 | dev->dma_mem->size = pages; | ||
226 | dev->dma_mem->flags = flags; | ||
227 | |||
228 | if (flags & DMA_MEMORY_MAP) | ||
229 | return DMA_MEMORY_MAP; | ||
230 | |||
231 | return DMA_MEMORY_IO; | ||
232 | |||
233 | free1_out: | ||
234 | kfree(dev->dma_mem); | ||
235 | out: | ||
236 | if (mem_base) | ||
237 | iounmap(mem_base); | ||
238 | return 0; | ||
239 | } | ||
240 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
241 | |||
242 | void dma_release_declared_memory(struct device *dev) | ||
243 | { | ||
244 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
245 | |||
246 | if (!mem) | ||
247 | return; | ||
248 | dev->dma_mem = NULL; | ||
249 | iounmap(mem->virt_base); | ||
250 | kfree(mem->bitmap); | ||
251 | kfree(mem); | ||
252 | } | ||
253 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
254 | |||
255 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
256 | dma_addr_t device_addr, size_t size) | ||
257 | { | ||
258 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
259 | int pos, err; | ||
260 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); | ||
261 | |||
262 | pages >>= PAGE_SHIFT; | ||
263 | |||
264 | if (!mem) | ||
265 | return ERR_PTR(-EINVAL); | ||
266 | |||
267 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
268 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | ||
269 | if (err != 0) | ||
270 | return ERR_PTR(err); | ||
271 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
272 | } | ||
273 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
274 | |||
275 | static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, | ||
276 | dma_addr_t *dma_handle, void **ret) | ||
277 | { | ||
278 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
279 | int order = get_order(size); | ||
280 | |||
281 | if (mem) { | ||
282 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | ||
283 | order); | ||
284 | if (page >= 0) { | ||
285 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
286 | *ret = mem->virt_base + (page << PAGE_SHIFT); | ||
287 | memset(*ret, 0, size); | ||
288 | } | ||
289 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
290 | *ret = NULL; | ||
291 | } | ||
292 | return (mem != NULL); | ||
293 | } | ||
294 | |||
295 | static int dma_release_coherent(struct device *dev, int order, void *vaddr) | ||
296 | { | ||
297 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
298 | |||
299 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
300 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
301 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
302 | |||
303 | bitmap_release_region(mem->bitmap, page, order); | ||
304 | return 1; | ||
305 | } | ||
306 | return 0; | ||
307 | } | ||
308 | #else | ||
309 | #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) | ||
310 | #define dma_release_coherent(dev, order, vaddr) (0) | ||
311 | #endif /* CONFIG_X86_32 */ | ||
312 | |||
313 | int dma_supported(struct device *dev, u64 mask) | 203 | int dma_supported(struct device *dev, u64 mask) |
314 | { | 204 | { |
315 | struct dma_mapping_ops *ops = get_dma_ops(dev); | 205 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
@@ -379,7 +269,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |||
379 | /* ignore region specifiers */ | 269 | /* ignore region specifiers */ |
380 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 270 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
381 | 271 | ||
382 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) | 272 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) |
383 | return memory; | 273 | return memory; |
384 | 274 | ||
385 | if (!dev) { | 275 | if (!dev) { |
@@ -484,7 +374,7 @@ void dma_free_coherent(struct device *dev, size_t size, | |||
484 | 374 | ||
485 | int order = get_order(size); | 375 | int order = get_order(size); |
486 | WARN_ON(irqs_disabled()); /* for portability */ | 376 | WARN_ON(irqs_disabled()); /* for portability */ |
487 | if (dma_release_coherent(dev, order, vaddr)) | 377 | if (dma_release_from_coherent(dev, order, vaddr)) |
488 | return; | 378 | return; |
489 | if (ops->unmap_single) | 379 | if (ops->unmap_single) |
490 | ops->unmap_single(dev, bus, size, 0); | 380 | ops->unmap_single(dev, bus, size, 0); |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 744126e64950..49285f8fd4d5 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -67,9 +67,6 @@ static u32 gart_unmapped_entry; | |||
67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | 67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) |
68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | 68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) |
69 | 69 | ||
70 | #define to_pages(addr, size) \ | ||
71 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | ||
72 | |||
73 | #define EMERGENCY_PAGES 32 /* = 128KB */ | 70 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
74 | 71 | ||
75 | #ifdef CONFIG_AGP | 72 | #ifdef CONFIG_AGP |
@@ -241,7 +238,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | |||
241 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | 238 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
242 | size_t size, int dir) | 239 | size_t size, int dir) |
243 | { | 240 | { |
244 | unsigned long npages = to_pages(phys_mem, size); | 241 | unsigned long npages = iommu_num_pages(phys_mem, size); |
245 | unsigned long iommu_page = alloc_iommu(dev, npages); | 242 | unsigned long iommu_page = alloc_iommu(dev, npages); |
246 | int i; | 243 | int i; |
247 | 244 | ||
@@ -304,7 +301,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
304 | return; | 301 | return; |
305 | 302 | ||
306 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | 303 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
307 | npages = to_pages(dma_addr, size); | 304 | npages = iommu_num_pages(dma_addr, size); |
308 | for (i = 0; i < npages; i++) { | 305 | for (i = 0; i < npages; i++) { |
309 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | 306 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; |
310 | CLEAR_LEAK(iommu_page + i); | 307 | CLEAR_LEAK(iommu_page + i); |
@@ -387,7 +384,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, | |||
387 | } | 384 | } |
388 | 385 | ||
389 | addr = phys_addr; | 386 | addr = phys_addr; |
390 | pages = to_pages(s->offset, s->length); | 387 | pages = iommu_num_pages(s->offset, s->length); |
391 | while (pages--) { | 388 | while (pages--) { |
392 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | 389 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
393 | SET_LEAK(iommu_page); | 390 | SET_LEAK(iommu_page); |
@@ -470,7 +467,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
470 | 467 | ||
471 | seg_size += s->length; | 468 | seg_size += s->length; |
472 | need = nextneed; | 469 | need = nextneed; |
473 | pages += to_pages(s->offset, s->length); | 470 | pages += iommu_num_pages(s->offset, s->length); |
474 | ps = s; | 471 | ps = s; |
475 | } | 472 | } |
476 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) | 473 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 06a9f643817e..724adfc63cb9 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -414,25 +414,20 @@ void native_machine_shutdown(void) | |||
414 | 414 | ||
415 | /* The boot cpu is always logical cpu 0 */ | 415 | /* The boot cpu is always logical cpu 0 */ |
416 | int reboot_cpu_id = 0; | 416 | int reboot_cpu_id = 0; |
417 | cpumask_of_cpu_ptr(newmask, reboot_cpu_id); | ||
418 | 417 | ||
419 | #ifdef CONFIG_X86_32 | 418 | #ifdef CONFIG_X86_32 |
420 | /* See if there has been given a command line override */ | 419 | /* See if there has been given a command line override */ |
421 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && | 420 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && |
422 | cpu_online(reboot_cpu)) { | 421 | cpu_online(reboot_cpu)) |
423 | reboot_cpu_id = reboot_cpu; | 422 | reboot_cpu_id = reboot_cpu; |
424 | cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); | ||
425 | } | ||
426 | #endif | 423 | #endif |
427 | 424 | ||
428 | /* Make certain the cpu I'm about to reboot on is online */ | 425 | /* Make certain the cpu I'm about to reboot on is online */ |
429 | if (!cpu_online(reboot_cpu_id)) { | 426 | if (!cpu_online(reboot_cpu_id)) |
430 | reboot_cpu_id = smp_processor_id(); | 427 | reboot_cpu_id = smp_processor_id(); |
431 | cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); | ||
432 | } | ||
433 | 428 | ||
434 | /* Make certain I only run on the appropriate processor */ | 429 | /* Make certain I only run on the appropriate processor */ |
435 | set_cpus_allowed_ptr(current, newmask); | 430 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); |
436 | 431 | ||
437 | /* O.K Now that I'm on the appropriate processor, | 432 | /* O.K Now that I'm on the appropriate processor, |
438 | * stop all of the others. | 433 | * stop all of the others. |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b520dae02bf4..2d888586385d 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -788,10 +788,6 @@ void __init setup_arch(char **cmdline_p) | |||
788 | 788 | ||
789 | initmem_init(0, max_pfn); | 789 | initmem_init(0, max_pfn); |
790 | 790 | ||
791 | #ifdef CONFIG_X86_64 | ||
792 | dma32_reserve_bootmem(); | ||
793 | #endif | ||
794 | |||
795 | #ifdef CONFIG_ACPI_SLEEP | 791 | #ifdef CONFIG_ACPI_SLEEP |
796 | /* | 792 | /* |
797 | * Reserve low memory region for sleep support. | 793 | * Reserve low memory region for sleep support. |
@@ -806,6 +802,15 @@ void __init setup_arch(char **cmdline_p) | |||
806 | #endif | 802 | #endif |
807 | reserve_crashkernel(); | 803 | reserve_crashkernel(); |
808 | 804 | ||
805 | #ifdef CONFIG_X86_64 | ||
806 | /* | ||
807 | * dma32_reserve_bootmem() allocates bootmem which may conflict | ||
808 | * with the crashkernel command line, so do that after | ||
809 | * reserve_crashkernel() | ||
810 | */ | ||
811 | dma32_reserve_bootmem(); | ||
812 | #endif | ||
813 | |||
809 | reserve_ibft_region(); | 814 | reserve_ibft_region(); |
810 | 815 | ||
811 | #ifdef CONFIG_KVM_CLOCK | 816 | #ifdef CONFIG_KVM_CLOCK |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index f7745f94c006..76e305e064f9 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -80,24 +80,6 @@ static void __init setup_per_cpu_maps(void) | |||
80 | #endif | 80 | #endif |
81 | } | 81 | } |
82 | 82 | ||
83 | #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP | ||
84 | cpumask_t *cpumask_of_cpu_map __read_mostly; | ||
85 | EXPORT_SYMBOL(cpumask_of_cpu_map); | ||
86 | |||
87 | /* requires nr_cpu_ids to be initialized */ | ||
88 | static void __init setup_cpumask_of_cpu(void) | ||
89 | { | ||
90 | int i; | ||
91 | |||
92 | /* alloc_bootmem zeroes memory */ | ||
93 | cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids); | ||
94 | for (i = 0; i < nr_cpu_ids; i++) | ||
95 | cpu_set(i, cpumask_of_cpu_map[i]); | ||
96 | } | ||
97 | #else | ||
98 | static inline void setup_cpumask_of_cpu(void) { } | ||
99 | #endif | ||
100 | |||
101 | #ifdef CONFIG_X86_32 | 83 | #ifdef CONFIG_X86_32 |
102 | /* | 84 | /* |
103 | * Great future not-so-futuristic plan: make i386 and x86_64 do it | 85 | * Great future not-so-futuristic plan: make i386 and x86_64 do it |
@@ -197,9 +179,6 @@ void __init setup_per_cpu_areas(void) | |||
197 | 179 | ||
198 | /* Setup node to cpumask map */ | 180 | /* Setup node to cpumask map */ |
199 | setup_node_to_cpumask_map(); | 181 | setup_node_to_cpumask_map(); |
200 | |||
201 | /* Setup cpumask_of_cpu map */ | ||
202 | setup_cpumask_of_cpu(); | ||
203 | } | 182 | } |
204 | 183 | ||
205 | #endif | 184 | #endif |