diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-03 15:04:39 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-03 15:04:39 -0500 |
commit | 7d3b56ba37a95f1f370f50258ed3954c304c524b (patch) | |
tree | 86102527b92f02450aa245f084ffb491c18d2e0a /drivers | |
parent | 269b012321f2f1f8e4648c43a93bf432b42c6668 (diff) | |
parent | ab14398abd195af91a744c320a52a1bce814dd1e (diff) |
Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits)
x86: setup_per_cpu_areas() cleanup
cpumask: fix compile error when CONFIG_NR_CPUS is not defined
cpumask: use alloc_cpumask_var_node where appropriate
cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t
x86: use cpumask_var_t in acpi/boot.c
x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids
sched: put back some stack hog changes that were undone in kernel/sched.c
x86: enable cpus display of kernel_max and offlined cpus
ia64: cpumask fix for is_affinity_mask_valid()
cpumask: convert RCU implementations, fix
xtensa: define __fls
mn10300: define __fls
m32r: define __fls
h8300: define __fls
frv: define __fls
cris: define __fls
cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
cpumask: zero extra bits in alloc_cpumask_var_node
cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/
cpumask: convert mm/
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/processor_core.c | 14 | ||||
-rw-r--r-- | drivers/acpi/processor_perflib.c | 28 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 80 | ||||
-rw-r--r-- | drivers/base/cpu.c | 44 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 17 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_file_ops.c | 8 | ||||
-rw-r--r-- | drivers/pnp/pnpbios/bioscalls.c | 2 |
7 files changed, 135 insertions, 58 deletions
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 34948362f41d..0cc2fd31e376 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -826,6 +826,11 @@ static int acpi_processor_add(struct acpi_device *device) | |||
826 | if (!pr) | 826 | if (!pr) |
827 | return -ENOMEM; | 827 | return -ENOMEM; |
828 | 828 | ||
829 | if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { | ||
830 | kfree(pr); | ||
831 | return -ENOMEM; | ||
832 | } | ||
833 | |||
829 | pr->handle = device->handle; | 834 | pr->handle = device->handle; |
830 | strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); | 835 | strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); |
831 | strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); | 836 | strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); |
@@ -845,10 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type) | |||
845 | 850 | ||
846 | pr = acpi_driver_data(device); | 851 | pr = acpi_driver_data(device); |
847 | 852 | ||
848 | if (pr->id >= nr_cpu_ids) { | 853 | if (pr->id >= nr_cpu_ids) |
849 | kfree(pr); | 854 | goto free; |
850 | return 0; | ||
851 | } | ||
852 | 855 | ||
853 | if (type == ACPI_BUS_REMOVAL_EJECT) { | 856 | if (type == ACPI_BUS_REMOVAL_EJECT) { |
854 | if (acpi_processor_handle_eject(pr)) | 857 | if (acpi_processor_handle_eject(pr)) |
@@ -873,6 +876,9 @@ static int acpi_processor_remove(struct acpi_device *device, int type) | |||
873 | 876 | ||
874 | per_cpu(processors, pr->id) = NULL; | 877 | per_cpu(processors, pr->id) = NULL; |
875 | per_cpu(processor_device_array, pr->id) = NULL; | 878 | per_cpu(processor_device_array, pr->id) = NULL; |
879 | |||
880 | free: | ||
881 | free_cpumask_var(pr->throttling.shared_cpu_map); | ||
876 | kfree(pr); | 882 | kfree(pr); |
877 | 883 | ||
878 | return 0; | 884 | return 0; |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 0d7b772bef50..846e227592d4 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -588,12 +588,15 @@ int acpi_processor_preregister_performance( | |||
588 | int count, count_target; | 588 | int count, count_target; |
589 | int retval = 0; | 589 | int retval = 0; |
590 | unsigned int i, j; | 590 | unsigned int i, j; |
591 | cpumask_t covered_cpus; | 591 | cpumask_var_t covered_cpus; |
592 | struct acpi_processor *pr; | 592 | struct acpi_processor *pr; |
593 | struct acpi_psd_package *pdomain; | 593 | struct acpi_psd_package *pdomain; |
594 | struct acpi_processor *match_pr; | 594 | struct acpi_processor *match_pr; |
595 | struct acpi_psd_package *match_pdomain; | 595 | struct acpi_psd_package *match_pdomain; |
596 | 596 | ||
597 | if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) | ||
598 | return -ENOMEM; | ||
599 | |||
597 | mutex_lock(&performance_mutex); | 600 | mutex_lock(&performance_mutex); |
598 | 601 | ||
599 | retval = 0; | 602 | retval = 0; |
@@ -617,7 +620,7 @@ int acpi_processor_preregister_performance( | |||
617 | } | 620 | } |
618 | 621 | ||
619 | pr->performance = percpu_ptr(performance, i); | 622 | pr->performance = percpu_ptr(performance, i); |
620 | cpu_set(i, pr->performance->shared_cpu_map); | 623 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); |
621 | if (acpi_processor_get_psd(pr)) { | 624 | if (acpi_processor_get_psd(pr)) { |
622 | retval = -EINVAL; | 625 | retval = -EINVAL; |
623 | continue; | 626 | continue; |
@@ -650,18 +653,18 @@ int acpi_processor_preregister_performance( | |||
650 | } | 653 | } |
651 | } | 654 | } |
652 | 655 | ||
653 | cpus_clear(covered_cpus); | 656 | cpumask_clear(covered_cpus); |
654 | for_each_possible_cpu(i) { | 657 | for_each_possible_cpu(i) { |
655 | pr = per_cpu(processors, i); | 658 | pr = per_cpu(processors, i); |
656 | if (!pr) | 659 | if (!pr) |
657 | continue; | 660 | continue; |
658 | 661 | ||
659 | if (cpu_isset(i, covered_cpus)) | 662 | if (cpumask_test_cpu(i, covered_cpus)) |
660 | continue; | 663 | continue; |
661 | 664 | ||
662 | pdomain = &(pr->performance->domain_info); | 665 | pdomain = &(pr->performance->domain_info); |
663 | cpu_set(i, pr->performance->shared_cpu_map); | 666 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); |
664 | cpu_set(i, covered_cpus); | 667 | cpumask_set_cpu(i, covered_cpus); |
665 | if (pdomain->num_processors <= 1) | 668 | if (pdomain->num_processors <= 1) |
666 | continue; | 669 | continue; |
667 | 670 | ||
@@ -699,8 +702,8 @@ int acpi_processor_preregister_performance( | |||
699 | goto err_ret; | 702 | goto err_ret; |
700 | } | 703 | } |
701 | 704 | ||
702 | cpu_set(j, covered_cpus); | 705 | cpumask_set_cpu(j, covered_cpus); |
703 | cpu_set(j, pr->performance->shared_cpu_map); | 706 | cpumask_set_cpu(j, pr->performance->shared_cpu_map); |
704 | count++; | 707 | count++; |
705 | } | 708 | } |
706 | 709 | ||
@@ -718,8 +721,8 @@ int acpi_processor_preregister_performance( | |||
718 | 721 | ||
719 | match_pr->performance->shared_type = | 722 | match_pr->performance->shared_type = |
720 | pr->performance->shared_type; | 723 | pr->performance->shared_type; |
721 | match_pr->performance->shared_cpu_map = | 724 | cpumask_copy(match_pr->performance->shared_cpu_map, |
722 | pr->performance->shared_cpu_map; | 725 | pr->performance->shared_cpu_map); |
723 | } | 726 | } |
724 | } | 727 | } |
725 | 728 | ||
@@ -731,14 +734,15 @@ err_ret: | |||
731 | 734 | ||
732 | /* Assume no coordination on any error parsing domain info */ | 735 | /* Assume no coordination on any error parsing domain info */ |
733 | if (retval) { | 736 | if (retval) { |
734 | cpus_clear(pr->performance->shared_cpu_map); | 737 | cpumask_clear(pr->performance->shared_cpu_map); |
735 | cpu_set(i, pr->performance->shared_cpu_map); | 738 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); |
736 | pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; | 739 | pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
737 | } | 740 | } |
738 | pr->performance = NULL; /* Will be set for real in register */ | 741 | pr->performance = NULL; /* Will be set for real in register */ |
739 | } | 742 | } |
740 | 743 | ||
741 | mutex_unlock(&performance_mutex); | 744 | mutex_unlock(&performance_mutex); |
745 | free_cpumask_var(covered_cpus); | ||
742 | return retval; | 746 | return retval; |
743 | } | 747 | } |
744 | EXPORT_SYMBOL(acpi_processor_preregister_performance); | 748 | EXPORT_SYMBOL(acpi_processor_preregister_performance); |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index a0c38c94a8a0..d27838171f4a 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void) | |||
61 | int count, count_target; | 61 | int count, count_target; |
62 | int retval = 0; | 62 | int retval = 0; |
63 | unsigned int i, j; | 63 | unsigned int i, j; |
64 | cpumask_t covered_cpus; | 64 | cpumask_var_t covered_cpus; |
65 | struct acpi_processor *pr, *match_pr; | 65 | struct acpi_processor *pr, *match_pr; |
66 | struct acpi_tsd_package *pdomain, *match_pdomain; | 66 | struct acpi_tsd_package *pdomain, *match_pdomain; |
67 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; | 67 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; |
68 | 68 | ||
69 | if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) | ||
70 | return -ENOMEM; | ||
71 | |||
69 | /* | 72 | /* |
70 | * Now that we have _TSD data from all CPUs, lets setup T-state | 73 | * Now that we have _TSD data from all CPUs, lets setup T-state |
71 | * coordination between all CPUs. | 74 | * coordination between all CPUs. |
@@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void) | |||
91 | if (retval) | 94 | if (retval) |
92 | goto err_ret; | 95 | goto err_ret; |
93 | 96 | ||
94 | cpus_clear(covered_cpus); | 97 | cpumask_clear(covered_cpus); |
95 | for_each_possible_cpu(i) { | 98 | for_each_possible_cpu(i) { |
96 | pr = per_cpu(processors, i); | 99 | pr = per_cpu(processors, i); |
97 | if (!pr) | 100 | if (!pr) |
98 | continue; | 101 | continue; |
99 | 102 | ||
100 | if (cpu_isset(i, covered_cpus)) | 103 | if (cpumask_test_cpu(i, covered_cpus)) |
101 | continue; | 104 | continue; |
102 | pthrottling = &pr->throttling; | 105 | pthrottling = &pr->throttling; |
103 | 106 | ||
104 | pdomain = &(pthrottling->domain_info); | 107 | pdomain = &(pthrottling->domain_info); |
105 | cpu_set(i, pthrottling->shared_cpu_map); | 108 | cpumask_set_cpu(i, pthrottling->shared_cpu_map); |
106 | cpu_set(i, covered_cpus); | 109 | cpumask_set_cpu(i, covered_cpus); |
107 | /* | 110 | /* |
108 | * If the number of processor in the TSD domain is 1, it is | 111 | * If the number of processor in the TSD domain is 1, it is |
109 | * unnecessary to parse the coordination for this CPU. | 112 | * unnecessary to parse the coordination for this CPU. |
@@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void) | |||
144 | goto err_ret; | 147 | goto err_ret; |
145 | } | 148 | } |
146 | 149 | ||
147 | cpu_set(j, covered_cpus); | 150 | cpumask_set_cpu(j, covered_cpus); |
148 | cpu_set(j, pthrottling->shared_cpu_map); | 151 | cpumask_set_cpu(j, pthrottling->shared_cpu_map); |
149 | count++; | 152 | count++; |
150 | } | 153 | } |
151 | for_each_possible_cpu(j) { | 154 | for_each_possible_cpu(j) { |
@@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void) | |||
165 | * If some CPUS have the same domain, they | 168 | * If some CPUS have the same domain, they |
166 | * will have the same shared_cpu_map. | 169 | * will have the same shared_cpu_map. |
167 | */ | 170 | */ |
168 | match_pthrottling->shared_cpu_map = | 171 | cpumask_copy(match_pthrottling->shared_cpu_map, |
169 | pthrottling->shared_cpu_map; | 172 | pthrottling->shared_cpu_map); |
170 | } | 173 | } |
171 | } | 174 | } |
172 | 175 | ||
173 | err_ret: | 176 | err_ret: |
177 | free_cpumask_var(covered_cpus); | ||
178 | |||
174 | for_each_possible_cpu(i) { | 179 | for_each_possible_cpu(i) { |
175 | pr = per_cpu(processors, i); | 180 | pr = per_cpu(processors, i); |
176 | if (!pr) | 181 | if (!pr) |
@@ -182,8 +187,8 @@ err_ret: | |||
182 | */ | 187 | */ |
183 | if (retval) { | 188 | if (retval) { |
184 | pthrottling = &(pr->throttling); | 189 | pthrottling = &(pr->throttling); |
185 | cpus_clear(pthrottling->shared_cpu_map); | 190 | cpumask_clear(pthrottling->shared_cpu_map); |
186 | cpu_set(i, pthrottling->shared_cpu_map); | 191 | cpumask_set_cpu(i, pthrottling->shared_cpu_map); |
187 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; | 192 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
188 | } | 193 | } |
189 | } | 194 | } |
@@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) | |||
567 | pthrottling = &pr->throttling; | 572 | pthrottling = &pr->throttling; |
568 | pthrottling->tsd_valid_flag = 1; | 573 | pthrottling->tsd_valid_flag = 1; |
569 | pthrottling->shared_type = pdomain->coord_type; | 574 | pthrottling->shared_type = pdomain->coord_type; |
570 | cpu_set(pr->id, pthrottling->shared_cpu_map); | 575 | cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); |
571 | /* | 576 | /* |
572 | * If the coordination type is not defined in ACPI spec, | 577 | * If the coordination type is not defined in ACPI spec, |
573 | * the tsd_valid_flag will be clear and coordination type | 578 | * the tsd_valid_flag will be clear and coordination type |
@@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) | |||
826 | 831 | ||
827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) | 832 | static int acpi_processor_get_throttling(struct acpi_processor *pr) |
828 | { | 833 | { |
829 | cpumask_t saved_mask; | 834 | cpumask_var_t saved_mask; |
830 | int ret; | 835 | int ret; |
831 | 836 | ||
832 | if (!pr) | 837 | if (!pr) |
@@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) | |||
834 | 839 | ||
835 | if (!pr->flags.throttling) | 840 | if (!pr->flags.throttling) |
836 | return -ENODEV; | 841 | return -ENODEV; |
842 | |||
843 | if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) | ||
844 | return -ENOMEM; | ||
845 | |||
837 | /* | 846 | /* |
838 | * Migrate task to the cpu pointed by pr. | 847 | * Migrate task to the cpu pointed by pr. |
839 | */ | 848 | */ |
840 | saved_mask = current->cpus_allowed; | 849 | cpumask_copy(saved_mask, ¤t->cpus_allowed); |
841 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | 850 | /* FIXME: use work_on_cpu() */ |
851 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | ||
842 | ret = pr->throttling.acpi_processor_get_throttling(pr); | 852 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
843 | /* restore the previous state */ | 853 | /* restore the previous state */ |
844 | set_cpus_allowed_ptr(current, &saved_mask); | 854 | set_cpus_allowed_ptr(current, saved_mask); |
855 | free_cpumask_var(saved_mask); | ||
845 | 856 | ||
846 | return ret; | 857 | return ret; |
847 | } | 858 | } |
@@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
986 | 997 | ||
987 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | 998 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
988 | { | 999 | { |
989 | cpumask_t saved_mask; | 1000 | cpumask_var_t saved_mask; |
990 | int ret = 0; | 1001 | int ret = 0; |
991 | unsigned int i; | 1002 | unsigned int i; |
992 | struct acpi_processor *match_pr; | 1003 | struct acpi_processor *match_pr; |
993 | struct acpi_processor_throttling *p_throttling; | 1004 | struct acpi_processor_throttling *p_throttling; |
994 | struct throttling_tstate t_state; | 1005 | struct throttling_tstate t_state; |
995 | cpumask_t online_throttling_cpus; | 1006 | cpumask_var_t online_throttling_cpus; |
996 | 1007 | ||
997 | if (!pr) | 1008 | if (!pr) |
998 | return -EINVAL; | 1009 | return -EINVAL; |
@@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1003 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) | 1014 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) |
1004 | return -EINVAL; | 1015 | return -EINVAL; |
1005 | 1016 | ||
1006 | saved_mask = current->cpus_allowed; | 1017 | if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) |
1018 | return -ENOMEM; | ||
1019 | |||
1020 | if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { | ||
1021 | free_cpumask_var(saved_mask); | ||
1022 | return -ENOMEM; | ||
1023 | } | ||
1024 | |||
1025 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | ||
1007 | t_state.target_state = state; | 1026 | t_state.target_state = state; |
1008 | p_throttling = &(pr->throttling); | 1027 | p_throttling = &(pr->throttling); |
1009 | cpus_and(online_throttling_cpus, cpu_online_map, | 1028 | cpumask_and(online_throttling_cpus, cpu_online_mask, |
1010 | p_throttling->shared_cpu_map); | 1029 | p_throttling->shared_cpu_map); |
1011 | /* | 1030 | /* |
1012 | * The throttling notifier will be called for every | 1031 | * The throttling notifier will be called for every |
1013 | * affected cpu in order to get one proper T-state. | 1032 | * affected cpu in order to get one proper T-state. |
1014 | * The notifier event is THROTTLING_PRECHANGE. | 1033 | * The notifier event is THROTTLING_PRECHANGE. |
1015 | */ | 1034 | */ |
1016 | for_each_cpu_mask_nr(i, online_throttling_cpus) { | 1035 | for_each_cpu(i, online_throttling_cpus) { |
1017 | t_state.cpu = i; | 1036 | t_state.cpu = i; |
1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, | 1037 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
1019 | &t_state); | 1038 | &t_state); |
@@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1025 | * it can be called only for the cpu pointed by pr. | 1044 | * it can be called only for the cpu pointed by pr. |
1026 | */ | 1045 | */ |
1027 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | 1046 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
1028 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | 1047 | /* FIXME: use work_on_cpu() */ |
1048 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | ||
1029 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1049 | ret = p_throttling->acpi_processor_set_throttling(pr, |
1030 | t_state.target_state); | 1050 | t_state.target_state); |
1031 | } else { | 1051 | } else { |
@@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1034 | * it is necessary to set T-state for every affected | 1054 | * it is necessary to set T-state for every affected |
1035 | * cpus. | 1055 | * cpus. |
1036 | */ | 1056 | */ |
1037 | for_each_cpu_mask_nr(i, online_throttling_cpus) { | 1057 | for_each_cpu(i, online_throttling_cpus) { |
1038 | match_pr = per_cpu(processors, i); | 1058 | match_pr = per_cpu(processors, i); |
1039 | /* | 1059 | /* |
1040 | * If the pointer is invalid, we will report the | 1060 | * If the pointer is invalid, we will report the |
@@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1056 | continue; | 1076 | continue; |
1057 | } | 1077 | } |
1058 | t_state.cpu = i; | 1078 | t_state.cpu = i; |
1059 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | 1079 | /* FIXME: use work_on_cpu() */ |
1080 | set_cpus_allowed_ptr(current, cpumask_of(i)); | ||
1060 | ret = match_pr->throttling. | 1081 | ret = match_pr->throttling. |
1061 | acpi_processor_set_throttling( | 1082 | acpi_processor_set_throttling( |
1062 | match_pr, t_state.target_state); | 1083 | match_pr, t_state.target_state); |
@@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1068 | * affected cpu to update the T-states. | 1089 | * affected cpu to update the T-states. |
1069 | * The notifier event is THROTTLING_POSTCHANGE | 1090 | * The notifier event is THROTTLING_POSTCHANGE |
1070 | */ | 1091 | */ |
1071 | for_each_cpu_mask_nr(i, online_throttling_cpus) { | 1092 | for_each_cpu(i, online_throttling_cpus) { |
1072 | t_state.cpu = i; | 1093 | t_state.cpu = i; |
1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | 1094 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, |
1074 | &t_state); | 1095 | &t_state); |
1075 | } | 1096 | } |
1076 | /* restore the previous state */ | 1097 | /* restore the previous state */ |
1077 | set_cpus_allowed_ptr(current, &saved_mask); | 1098 | /* FIXME: use work_on_cpu() */ |
1099 | set_cpus_allowed_ptr(current, saved_mask); | ||
1100 | free_cpumask_var(online_throttling_cpus); | ||
1101 | free_cpumask_var(saved_mask); | ||
1078 | return ret; | 1102 | return ret; |
1079 | } | 1103 | } |
1080 | 1104 | ||
@@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) | |||
1120 | if (acpi_processor_get_tsd(pr)) { | 1144 | if (acpi_processor_get_tsd(pr)) { |
1121 | pthrottling = &pr->throttling; | 1145 | pthrottling = &pr->throttling; |
1122 | pthrottling->tsd_valid_flag = 0; | 1146 | pthrottling->tsd_valid_flag = 0; |
1123 | cpu_set(pr->id, pthrottling->shared_cpu_map); | 1147 | cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); |
1124 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; | 1148 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
1125 | } | 1149 | } |
1126 | 1150 | ||
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 4259072f5bd0..719ee5c1c8d9 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -128,10 +128,54 @@ print_cpus_func(online); | |||
128 | print_cpus_func(possible); | 128 | print_cpus_func(possible); |
129 | print_cpus_func(present); | 129 | print_cpus_func(present); |
130 | 130 | ||
131 | /* | ||
132 | * Print values for NR_CPUS and offlined cpus | ||
133 | */ | ||
134 | static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf) | ||
135 | { | ||
136 | int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); | ||
137 | return n; | ||
138 | } | ||
139 | static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); | ||
140 | |||
141 | /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ | ||
142 | unsigned int total_cpus; | ||
143 | |||
144 | static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf) | ||
145 | { | ||
146 | int n = 0, len = PAGE_SIZE-2; | ||
147 | cpumask_var_t offline; | ||
148 | |||
149 | /* display offline cpus < nr_cpu_ids */ | ||
150 | if (!alloc_cpumask_var(&offline, GFP_KERNEL)) | ||
151 | return -ENOMEM; | ||
152 | cpumask_complement(offline, cpu_online_mask); | ||
153 | n = cpulist_scnprintf(buf, len, offline); | ||
154 | free_cpumask_var(offline); | ||
155 | |||
156 | /* display offline cpus >= nr_cpu_ids */ | ||
157 | if (total_cpus && nr_cpu_ids < total_cpus) { | ||
158 | if (n && n < len) | ||
159 | buf[n++] = ','; | ||
160 | |||
161 | if (nr_cpu_ids == total_cpus-1) | ||
162 | n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids); | ||
163 | else | ||
164 | n += snprintf(&buf[n], len - n, "%d-%d", | ||
165 | nr_cpu_ids, total_cpus-1); | ||
166 | } | ||
167 | |||
168 | n += snprintf(&buf[n], len - n, "\n"); | ||
169 | return n; | ||
170 | } | ||
171 | static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); | ||
172 | |||
131 | static struct sysdev_class_attribute *cpu_state_attr[] = { | 173 | static struct sysdev_class_attribute *cpu_state_attr[] = { |
132 | &attr_online_map, | 174 | &attr_online_map, |
133 | &attr_possible_map, | 175 | &attr_possible_map, |
134 | &attr_present_map, | 176 | &attr_present_map, |
177 | &attr_kernel_max, | ||
178 | &attr_offline, | ||
135 | }; | 179 | }; |
136 | 180 | ||
137 | static int cpu_states_init(void) | 181 | static int cpu_states_init(void) |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 757035ea246f..3128a5090dbd 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -659,12 +659,12 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) | |||
659 | 659 | ||
660 | WARN_ON_ONCE(!in_interrupt()); | 660 | WARN_ON_ONCE(!in_interrupt()); |
661 | if (ehca_debug_level >= 3) | 661 | if (ehca_debug_level >= 3) |
662 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 662 | ehca_dmp(cpu_online_mask, cpumask_size(), ""); |
663 | 663 | ||
664 | spin_lock_irqsave(&pool->last_cpu_lock, flags); | 664 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
665 | cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); | 665 | cpu = cpumask_next(pool->last_cpu, cpu_online_mask); |
666 | if (cpu >= nr_cpu_ids) | 666 | if (cpu >= nr_cpu_ids) |
667 | cpu = first_cpu(cpu_online_map); | 667 | cpu = cpumask_first(cpu_online_mask); |
668 | pool->last_cpu = cpu; | 668 | pool->last_cpu = cpu; |
669 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); | 669 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); |
670 | 670 | ||
@@ -855,7 +855,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb, | |||
855 | case CPU_UP_CANCELED_FROZEN: | 855 | case CPU_UP_CANCELED_FROZEN: |
856 | ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); | 856 | ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); |
857 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); | 857 | cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); |
858 | kthread_bind(cct->task, any_online_cpu(cpu_online_map)); | 858 | kthread_bind(cct->task, cpumask_any(cpu_online_mask)); |
859 | destroy_comp_task(pool, cpu); | 859 | destroy_comp_task(pool, cpu); |
860 | break; | 860 | break; |
861 | case CPU_ONLINE: | 861 | case CPU_ONLINE: |
@@ -902,7 +902,7 @@ int ehca_create_comp_pool(void) | |||
902 | return -ENOMEM; | 902 | return -ENOMEM; |
903 | 903 | ||
904 | spin_lock_init(&pool->last_cpu_lock); | 904 | spin_lock_init(&pool->last_cpu_lock); |
905 | pool->last_cpu = any_online_cpu(cpu_online_map); | 905 | pool->last_cpu = cpumask_any(cpu_online_mask); |
906 | 906 | ||
907 | pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); | 907 | pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); |
908 | if (pool->cpu_comp_tasks == NULL) { | 908 | if (pool->cpu_comp_tasks == NULL) { |
@@ -934,10 +934,9 @@ void ehca_destroy_comp_pool(void) | |||
934 | 934 | ||
935 | unregister_hotcpu_notifier(&comp_pool_callback_nb); | 935 | unregister_hotcpu_notifier(&comp_pool_callback_nb); |
936 | 936 | ||
937 | for (i = 0; i < NR_CPUS; i++) { | 937 | for_each_online_cpu(i) |
938 | if (cpu_online(i)) | 938 | destroy_comp_task(pool, i); |
939 | destroy_comp_task(pool, i); | 939 | |
940 | } | ||
941 | free_percpu(pool->cpu_comp_tasks); | 940 | free_percpu(pool->cpu_comp_tasks); |
942 | kfree(pool); | 941 | kfree(pool); |
943 | } | 942 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 239d4e8068ac..23173982b32c 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c | |||
@@ -1679,7 +1679,7 @@ static int find_best_unit(struct file *fp, | |||
1679 | * InfiniPath chip to that processor (we assume reasonable connectivity, | 1679 | * InfiniPath chip to that processor (we assume reasonable connectivity, |
1680 | * for now). This code assumes that if affinity has been set | 1680 | * for now). This code assumes that if affinity has been set |
1681 | * before this point, that at most one cpu is set; for now this | 1681 | * before this point, that at most one cpu is set; for now this |
1682 | * is reasonable. I check for both cpus_empty() and cpus_full(), | 1682 | * is reasonable. I check for both cpumask_empty() and cpumask_full(), |
1683 | * in case some kernel variant sets none of the bits when no | 1683 | * in case some kernel variant sets none of the bits when no |
1684 | * affinity is set. 2.6.11 and 12 kernels have all present | 1684 | * affinity is set. 2.6.11 and 12 kernels have all present |
1685 | * cpus set. Some day we'll have to fix it up further to handle | 1685 | * cpus set. Some day we'll have to fix it up further to handle |
@@ -1688,11 +1688,11 @@ static int find_best_unit(struct file *fp, | |||
1688 | * information. There may be some issues with dual core numbering | 1688 | * information. There may be some issues with dual core numbering |
1689 | * as well. This needs more work prior to release. | 1689 | * as well. This needs more work prior to release. |
1690 | */ | 1690 | */ |
1691 | if (!cpus_empty(current->cpus_allowed) && | 1691 | if (!cpumask_empty(¤t->cpus_allowed) && |
1692 | !cpus_full(current->cpus_allowed)) { | 1692 | !cpumask_full(¤t->cpus_allowed)) { |
1693 | int ncpus = num_online_cpus(), curcpu = -1, nset = 0; | 1693 | int ncpus = num_online_cpus(), curcpu = -1, nset = 0; |
1694 | for (i = 0; i < ncpus; i++) | 1694 | for (i = 0; i < ncpus; i++) |
1695 | if (cpu_isset(i, current->cpus_allowed)) { | 1695 | if (cpumask_test_cpu(i, ¤t->cpus_allowed)) { |
1696 | ipath_cdbg(PROC, "%s[%u] affinity set for " | 1696 | ipath_cdbg(PROC, "%s[%u] affinity set for " |
1697 | "cpu %d/%d\n", current->comm, | 1697 | "cpu %d/%d\n", current->comm, |
1698 | current->pid, i, ncpus); | 1698 | current->pid, i, ncpus); |
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 7ff824496b39..7e6b5a3b3281 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c | |||
@@ -481,7 +481,7 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) | |||
481 | 481 | ||
482 | set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); | 482 | set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); |
483 | _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); | 483 | _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); |
484 | for (i = 0; i < NR_CPUS; i++) { | 484 | for_each_possible_cpu(i) { |
485 | struct desc_struct *gdt = get_cpu_gdt_table(i); | 485 | struct desc_struct *gdt = get_cpu_gdt_table(i); |
486 | if (!gdt) | 486 | if (!gdt) |
487 | continue; | 487 | continue; |