diff options
-rw-r--r-- | drivers/acpi/processor_core.c | 6 | ||||
-rw-r--r-- | drivers/acpi/processor_thermal.c | 6 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 30 | ||||
-rw-r--r-- | drivers/leds/ledtrig-gpio.c | 24 | ||||
-rw-r--r-- | drivers/platform/x86/wmi.c | 8 | ||||
-rw-r--r-- | drivers/pps/pps.c | 2 | ||||
-rw-r--r-- | drivers/thermal/thermal_sys.c | 9 | ||||
-rw-r--r-- | include/acpi/processor.h | 5 | ||||
-rw-r--r-- | include/linux/flex_array.h | 12 | ||||
-rw-r--r-- | kernel/fork.c | 20 | ||||
-rw-r--r-- | lib/flex_array.c | 41 | ||||
-rw-r--r-- | mm/rmap.c | 1 | ||||
-rw-r--r-- | mm/vmscan.c | 9 |
13 files changed, 99 insertions, 74 deletions
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 84e0f3c07442..2cc4b3033872 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -1151,6 +1151,9 @@ static int __init acpi_processor_init(void) | |||
1151 | { | 1151 | { |
1152 | int result = 0; | 1152 | int result = 0; |
1153 | 1153 | ||
1154 | if (acpi_disabled) | ||
1155 | return 0; | ||
1156 | |||
1154 | memset(&errata, 0, sizeof(errata)); | 1157 | memset(&errata, 0, sizeof(errata)); |
1155 | 1158 | ||
1156 | #ifdef CONFIG_SMP | 1159 | #ifdef CONFIG_SMP |
@@ -1197,6 +1200,9 @@ out_proc: | |||
1197 | 1200 | ||
1198 | static void __exit acpi_processor_exit(void) | 1201 | static void __exit acpi_processor_exit(void) |
1199 | { | 1202 | { |
1203 | if (acpi_disabled) | ||
1204 | return; | ||
1205 | |||
1200 | acpi_processor_ppc_exit(); | 1206 | acpi_processor_ppc_exit(); |
1201 | 1207 | ||
1202 | acpi_thermal_cpufreq_exit(); | 1208 | acpi_thermal_cpufreq_exit(); |
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 39838c666032..31adda1099e0 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c | |||
@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr) | |||
66 | if (pr->limit.thermal.tx > tx) | 66 | if (pr->limit.thermal.tx > tx) |
67 | tx = pr->limit.thermal.tx; | 67 | tx = pr->limit.thermal.tx; |
68 | 68 | ||
69 | result = acpi_processor_set_throttling(pr, tx); | 69 | result = acpi_processor_set_throttling(pr, tx, false); |
70 | if (result) | 70 | if (result) |
71 | goto end; | 71 | goto end; |
72 | } | 72 | } |
@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev, | |||
421 | 421 | ||
422 | if (state <= max_pstate) { | 422 | if (state <= max_pstate) { |
423 | if (pr->flags.throttling && pr->throttling.state) | 423 | if (pr->flags.throttling && pr->throttling.state) |
424 | result = acpi_processor_set_throttling(pr, 0); | 424 | result = acpi_processor_set_throttling(pr, 0, false); |
425 | cpufreq_set_cur_state(pr->id, state); | 425 | cpufreq_set_cur_state(pr->id, state); |
426 | } else { | 426 | } else { |
427 | cpufreq_set_cur_state(pr->id, max_pstate); | 427 | cpufreq_set_cur_state(pr->id, max_pstate); |
428 | result = acpi_processor_set_throttling(pr, | 428 | result = acpi_processor_set_throttling(pr, |
429 | state - max_pstate); | 429 | state - max_pstate, false); |
430 | } | 430 | } |
431 | return result; | 431 | return result; |
432 | } | 432 | } |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 227543789ba9..ae39797aab55 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -62,7 +62,8 @@ struct throttling_tstate { | |||
62 | #define THROTTLING_POSTCHANGE (2) | 62 | #define THROTTLING_POSTCHANGE (2) |
63 | 63 | ||
64 | static int acpi_processor_get_throttling(struct acpi_processor *pr); | 64 | static int acpi_processor_get_throttling(struct acpi_processor *pr); |
65 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state); | 65 | int acpi_processor_set_throttling(struct acpi_processor *pr, |
66 | int state, bool force); | ||
66 | 67 | ||
67 | static int acpi_processor_update_tsd_coord(void) | 68 | static int acpi_processor_update_tsd_coord(void) |
68 | { | 69 | { |
@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) | |||
361 | */ | 362 | */ |
362 | target_state = throttling_limit; | 363 | target_state = throttling_limit; |
363 | } | 364 | } |
364 | return acpi_processor_set_throttling(pr, target_state); | 365 | return acpi_processor_set_throttling(pr, target_state, false); |
365 | } | 366 | } |
366 | 367 | ||
367 | /* | 368 | /* |
@@ -839,10 +840,10 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) | |||
839 | if (ret >= 0) { | 840 | if (ret >= 0) { |
840 | state = acpi_get_throttling_state(pr, value); | 841 | state = acpi_get_throttling_state(pr, value); |
841 | if (state == -1) { | 842 | if (state == -1) { |
842 | ACPI_WARNING((AE_INFO, | 843 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
843 | "Invalid throttling state, reset")); | 844 | "Invalid throttling state, reset\n")); |
844 | state = 0; | 845 | state = 0; |
845 | ret = acpi_processor_set_throttling(pr, state); | 846 | ret = acpi_processor_set_throttling(pr, state, true); |
846 | if (ret) | 847 | if (ret) |
847 | return ret; | 848 | return ret; |
848 | } | 849 | } |
@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr) | |||
915 | } | 916 | } |
916 | 917 | ||
917 | static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, | 918 | static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, |
918 | int state) | 919 | int state, bool force) |
919 | { | 920 | { |
920 | u32 value = 0; | 921 | u32 value = 0; |
921 | u32 duty_mask = 0; | 922 | u32 duty_mask = 0; |
@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, | |||
930 | if (!pr->flags.throttling) | 931 | if (!pr->flags.throttling) |
931 | return -ENODEV; | 932 | return -ENODEV; |
932 | 933 | ||
933 | if (state == pr->throttling.state) | 934 | if (!force && (state == pr->throttling.state)) |
934 | return 0; | 935 | return 0; |
935 | 936 | ||
936 | if (state < pr->throttling_platform_limit) | 937 | if (state < pr->throttling_platform_limit) |
@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, | |||
988 | } | 989 | } |
989 | 990 | ||
990 | static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | 991 | static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, |
991 | int state) | 992 | int state, bool force) |
992 | { | 993 | { |
993 | int ret; | 994 | int ret; |
994 | acpi_integer value; | 995 | acpi_integer value; |
@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
1002 | if (!pr->flags.throttling) | 1003 | if (!pr->flags.throttling) |
1003 | return -ENODEV; | 1004 | return -ENODEV; |
1004 | 1005 | ||
1005 | if (state == pr->throttling.state) | 1006 | if (!force && (state == pr->throttling.state)) |
1006 | return 0; | 1007 | return 0; |
1007 | 1008 | ||
1008 | if (state < pr->throttling_platform_limit) | 1009 | if (state < pr->throttling_platform_limit) |
@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
1018 | return 0; | 1019 | return 0; |
1019 | } | 1020 | } |
1020 | 1021 | ||
1021 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | 1022 | int acpi_processor_set_throttling(struct acpi_processor *pr, |
1023 | int state, bool force) | ||
1022 | { | 1024 | { |
1023 | cpumask_var_t saved_mask; | 1025 | cpumask_var_t saved_mask; |
1024 | int ret = 0; | 1026 | int ret = 0; |
@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1070 | /* FIXME: use work_on_cpu() */ | 1072 | /* FIXME: use work_on_cpu() */ |
1071 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | 1073 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); |
1072 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1074 | ret = p_throttling->acpi_processor_set_throttling(pr, |
1073 | t_state.target_state); | 1075 | t_state.target_state, force); |
1074 | } else { | 1076 | } else { |
1075 | /* | 1077 | /* |
1076 | * When the T-state coordination is SW_ALL or HW_ALL, | 1078 | * When the T-state coordination is SW_ALL or HW_ALL, |
@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1103 | set_cpus_allowed_ptr(current, cpumask_of(i)); | 1105 | set_cpus_allowed_ptr(current, cpumask_of(i)); |
1104 | ret = match_pr->throttling. | 1106 | ret = match_pr->throttling. |
1105 | acpi_processor_set_throttling( | 1107 | acpi_processor_set_throttling( |
1106 | match_pr, t_state.target_state); | 1108 | match_pr, t_state.target_state, force); |
1107 | } | 1109 | } |
1108 | } | 1110 | } |
1109 | /* | 1111 | /* |
@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) | |||
1201 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 1203 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
1202 | "Disabling throttling (was T%d)\n", | 1204 | "Disabling throttling (was T%d)\n", |
1203 | pr->throttling.state)); | 1205 | pr->throttling.state)); |
1204 | result = acpi_processor_set_throttling(pr, 0); | 1206 | result = acpi_processor_set_throttling(pr, 0, false); |
1205 | if (result) | 1207 | if (result) |
1206 | goto end; | 1208 | goto end; |
1207 | } | 1209 | } |
@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file, | |||
1307 | if (strcmp(tmpbuf, charp) != 0) | 1309 | if (strcmp(tmpbuf, charp) != 0) |
1308 | return -EINVAL; | 1310 | return -EINVAL; |
1309 | 1311 | ||
1310 | result = acpi_processor_set_throttling(pr, state_val); | 1312 | result = acpi_processor_set_throttling(pr, state_val, false); |
1311 | if (result) | 1313 | if (result) |
1312 | return result; | 1314 | return result; |
1313 | 1315 | ||
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c index a247ae63374f..1bc5db4ece0d 100644 --- a/drivers/leds/ledtrig-gpio.c +++ b/drivers/leds/ledtrig-gpio.c | |||
@@ -117,6 +117,9 @@ static ssize_t gpio_trig_inverted_store(struct device *dev, | |||
117 | 117 | ||
118 | gpio_data->inverted = !!inverted; | 118 | gpio_data->inverted = !!inverted; |
119 | 119 | ||
120 | /* After inverting, we need to update the LED. */ | ||
121 | schedule_work(&gpio_data->work); | ||
122 | |||
120 | return n; | 123 | return n; |
121 | } | 124 | } |
122 | static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show, | 125 | static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show, |
@@ -146,20 +149,26 @@ static ssize_t gpio_trig_gpio_store(struct device *dev, | |||
146 | return -EINVAL; | 149 | return -EINVAL; |
147 | } | 150 | } |
148 | 151 | ||
152 | if (gpio_data->gpio == gpio) | ||
153 | return n; | ||
154 | |||
149 | if (!gpio) { | 155 | if (!gpio) { |
150 | free_irq(gpio_to_irq(gpio_data->gpio), led); | 156 | if (gpio_data->gpio != 0) |
157 | free_irq(gpio_to_irq(gpio_data->gpio), led); | ||
158 | gpio_data->gpio = 0; | ||
151 | return n; | 159 | return n; |
152 | } | 160 | } |
153 | 161 | ||
154 | if (gpio_data->gpio > 0 && gpio_data->gpio != gpio) | ||
155 | free_irq(gpio_to_irq(gpio_data->gpio), led); | ||
156 | |||
157 | gpio_data->gpio = gpio; | ||
158 | ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq, | 162 | ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq, |
159 | IRQF_SHARED | IRQF_TRIGGER_RISING | 163 | IRQF_SHARED | IRQF_TRIGGER_RISING |
160 | | IRQF_TRIGGER_FALLING, "ledtrig-gpio", led); | 164 | | IRQF_TRIGGER_FALLING, "ledtrig-gpio", led); |
161 | if (ret) | 165 | if (ret) { |
162 | dev_err(dev, "request_irq failed with error %d\n", ret); | 166 | dev_err(dev, "request_irq failed with error %d\n", ret); |
167 | } else { | ||
168 | if (gpio_data->gpio != 0) | ||
169 | free_irq(gpio_to_irq(gpio_data->gpio), led); | ||
170 | gpio_data->gpio = gpio; | ||
171 | } | ||
163 | 172 | ||
164 | return ret ? ret : n; | 173 | return ret ? ret : n; |
165 | } | 174 | } |
@@ -211,7 +220,8 @@ static void gpio_trig_deactivate(struct led_classdev *led) | |||
211 | device_remove_file(led->dev, &dev_attr_inverted); | 220 | device_remove_file(led->dev, &dev_attr_inverted); |
212 | device_remove_file(led->dev, &dev_attr_desired_brightness); | 221 | device_remove_file(led->dev, &dev_attr_desired_brightness); |
213 | flush_work(&gpio_data->work); | 222 | flush_work(&gpio_data->work); |
214 | free_irq(gpio_to_irq(gpio_data->gpio),led); | 223 | if (gpio_data->gpio != 0) |
224 | free_irq(gpio_to_irq(gpio_data->gpio), led); | ||
215 | kfree(gpio_data); | 225 | kfree(gpio_data); |
216 | } | 226 | } |
217 | } | 227 | } |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 043b208d971d..f215a5919192 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) | |||
270 | acpi_status status; | 270 | acpi_status status; |
271 | struct acpi_object_list input; | 271 | struct acpi_object_list input; |
272 | union acpi_object params[3]; | 272 | union acpi_object params[3]; |
273 | char method[4] = "WM"; | 273 | char method[5] = "WM"; |
274 | 274 | ||
275 | if (!find_guid(guid_string, &wblock)) | 275 | if (!find_guid(guid_string, &wblock)) |
276 | return AE_ERROR; | 276 | return AE_ERROR; |
@@ -328,8 +328,8 @@ struct acpi_buffer *out) | |||
328 | acpi_status status, wc_status = AE_ERROR; | 328 | acpi_status status, wc_status = AE_ERROR; |
329 | struct acpi_object_list input, wc_input; | 329 | struct acpi_object_list input, wc_input; |
330 | union acpi_object wc_params[1], wq_params[1]; | 330 | union acpi_object wc_params[1], wq_params[1]; |
331 | char method[4]; | 331 | char method[5]; |
332 | char wc_method[4] = "WC"; | 332 | char wc_method[5] = "WC"; |
333 | 333 | ||
334 | if (!guid_string || !out) | 334 | if (!guid_string || !out) |
335 | return AE_BAD_PARAMETER; | 335 | return AE_BAD_PARAMETER; |
@@ -410,7 +410,7 @@ const struct acpi_buffer *in) | |||
410 | acpi_handle handle; | 410 | acpi_handle handle; |
411 | struct acpi_object_list input; | 411 | struct acpi_object_list input; |
412 | union acpi_object params[2]; | 412 | union acpi_object params[2]; |
413 | char method[4] = "WS"; | 413 | char method[5] = "WS"; |
414 | 414 | ||
415 | if (!guid_string || !in) | 415 | if (!guid_string || !in) |
416 | return AE_BAD_DATA; | 416 | return AE_BAD_DATA; |
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index ac8cc8cea1e3..fea17e7805e9 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c | |||
@@ -244,7 +244,7 @@ int pps_register_cdev(struct pps_device *pps) | |||
244 | } | 244 | } |
245 | pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL, | 245 | pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL, |
246 | "pps%d", pps->id); | 246 | "pps%d", pps->id); |
247 | if (err) | 247 | if (IS_ERR(pps->dev)) |
248 | goto del_cdev; | 248 | goto del_cdev; |
249 | dev_set_drvdata(pps->dev, pps); | 249 | dev_set_drvdata(pps->dev, pps); |
250 | 250 | ||
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 0a69672097a8..4e83c297ec9e 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
@@ -953,7 +953,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) | |||
953 | 953 | ||
954 | mutex_lock(&tz->lock); | 954 | mutex_lock(&tz->lock); |
955 | 955 | ||
956 | tz->ops->get_temp(tz, &temp); | 956 | if (tz->ops->get_temp(tz, &temp)) { |
957 | /* get_temp failed - retry it later */ | ||
958 | printk(KERN_WARNING PREFIX "failed to read out thermal zone " | ||
959 | "%d\n", tz->id); | ||
960 | goto leave; | ||
961 | } | ||
957 | 962 | ||
958 | for (count = 0; count < tz->trips; count++) { | 963 | for (count = 0; count < tz->trips; count++) { |
959 | tz->ops->get_trip_type(tz, count, &trip_type); | 964 | tz->ops->get_trip_type(tz, count, &trip_type); |
@@ -1005,6 +1010,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) | |||
1005 | THERMAL_TRIPS_NONE); | 1010 | THERMAL_TRIPS_NONE); |
1006 | 1011 | ||
1007 | tz->last_temperature = temp; | 1012 | tz->last_temperature = temp; |
1013 | |||
1014 | leave: | ||
1008 | if (tz->passive) | 1015 | if (tz->passive) |
1009 | thermal_zone_device_set_polling(tz, tz->passive_delay); | 1016 | thermal_zone_device_set_polling(tz, tz->passive_delay); |
1010 | else if (tz->polling_delay) | 1017 | else if (tz->polling_delay) |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index baf1e0a9a7ee..740ac3ad8fd0 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
@@ -174,7 +174,7 @@ struct acpi_processor_throttling { | |||
174 | cpumask_var_t shared_cpu_map; | 174 | cpumask_var_t shared_cpu_map; |
175 | int (*acpi_processor_get_throttling) (struct acpi_processor * pr); | 175 | int (*acpi_processor_get_throttling) (struct acpi_processor * pr); |
176 | int (*acpi_processor_set_throttling) (struct acpi_processor * pr, | 176 | int (*acpi_processor_set_throttling) (struct acpi_processor * pr, |
177 | int state); | 177 | int state, bool force); |
178 | 178 | ||
179 | u32 address; | 179 | u32 address; |
180 | u8 duty_offset; | 180 | u8 duty_offset; |
@@ -321,7 +321,8 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr) | |||
321 | /* in processor_throttling.c */ | 321 | /* in processor_throttling.c */ |
322 | int acpi_processor_tstate_has_changed(struct acpi_processor *pr); | 322 | int acpi_processor_tstate_has_changed(struct acpi_processor *pr); |
323 | int acpi_processor_get_throttling_info(struct acpi_processor *pr); | 323 | int acpi_processor_get_throttling_info(struct acpi_processor *pr); |
324 | extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state); | 324 | extern int acpi_processor_set_throttling(struct acpi_processor *pr, |
325 | int state, bool force); | ||
325 | extern const struct file_operations acpi_processor_throttling_fops; | 326 | extern const struct file_operations acpi_processor_throttling_fops; |
326 | extern void acpi_processor_throttling_init(void); | 327 | extern void acpi_processor_throttling_init(void); |
327 | /* in processor_idle.c */ | 328 | /* in processor_idle.c */ |
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index 23c1ec79a31b..45ff18491514 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h | |||
@@ -21,7 +21,7 @@ struct flex_array { | |||
21 | struct { | 21 | struct { |
22 | int element_size; | 22 | int element_size; |
23 | int total_nr_elements; | 23 | int total_nr_elements; |
24 | struct flex_array_part *parts[0]; | 24 | struct flex_array_part *parts[]; |
25 | }; | 25 | }; |
26 | /* | 26 | /* |
27 | * This little trick makes sure that | 27 | * This little trick makes sure that |
@@ -36,12 +36,14 @@ struct flex_array { | |||
36 | .total_nr_elements = (total), \ | 36 | .total_nr_elements = (total), \ |
37 | } } } | 37 | } } } |
38 | 38 | ||
39 | struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags); | 39 | struct flex_array *flex_array_alloc(int element_size, unsigned int total, |
40 | int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags); | 40 | gfp_t flags); |
41 | int flex_array_prealloc(struct flex_array *fa, unsigned int start, | ||
42 | unsigned int end, gfp_t flags); | ||
41 | void flex_array_free(struct flex_array *fa); | 43 | void flex_array_free(struct flex_array *fa); |
42 | void flex_array_free_parts(struct flex_array *fa); | 44 | void flex_array_free_parts(struct flex_array *fa); |
43 | int flex_array_put(struct flex_array *fa, int element_nr, void *src, | 45 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, |
44 | gfp_t flags); | 46 | gfp_t flags); |
45 | void *flex_array_get(struct flex_array *fa, int element_nr); | 47 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr); |
46 | 48 | ||
47 | #endif /* _FLEX_ARRAY_H */ | 49 | #endif /* _FLEX_ARRAY_H */ |
diff --git a/kernel/fork.c b/kernel/fork.c index 144326b7af50..e6c04d462ab2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -815,11 +815,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
815 | { | 815 | { |
816 | struct signal_struct *sig; | 816 | struct signal_struct *sig; |
817 | 817 | ||
818 | if (clone_flags & CLONE_THREAD) { | 818 | if (clone_flags & CLONE_THREAD) |
819 | atomic_inc(¤t->signal->count); | ||
820 | atomic_inc(¤t->signal->live); | ||
821 | return 0; | 819 | return 0; |
822 | } | ||
823 | 820 | ||
824 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 821 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
825 | tsk->signal = sig; | 822 | tsk->signal = sig; |
@@ -877,16 +874,6 @@ void __cleanup_signal(struct signal_struct *sig) | |||
877 | kmem_cache_free(signal_cachep, sig); | 874 | kmem_cache_free(signal_cachep, sig); |
878 | } | 875 | } |
879 | 876 | ||
880 | static void cleanup_signal(struct task_struct *tsk) | ||
881 | { | ||
882 | struct signal_struct *sig = tsk->signal; | ||
883 | |||
884 | atomic_dec(&sig->live); | ||
885 | |||
886 | if (atomic_dec_and_test(&sig->count)) | ||
887 | __cleanup_signal(sig); | ||
888 | } | ||
889 | |||
890 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) | 877 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) |
891 | { | 878 | { |
892 | unsigned long new_flags = p->flags; | 879 | unsigned long new_flags = p->flags; |
@@ -1239,6 +1226,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1239 | } | 1226 | } |
1240 | 1227 | ||
1241 | if (clone_flags & CLONE_THREAD) { | 1228 | if (clone_flags & CLONE_THREAD) { |
1229 | atomic_inc(¤t->signal->count); | ||
1230 | atomic_inc(¤t->signal->live); | ||
1242 | p->group_leader = current->group_leader; | 1231 | p->group_leader = current->group_leader; |
1243 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); | 1232 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
1244 | } | 1233 | } |
@@ -1282,7 +1271,8 @@ bad_fork_cleanup_mm: | |||
1282 | if (p->mm) | 1271 | if (p->mm) |
1283 | mmput(p->mm); | 1272 | mmput(p->mm); |
1284 | bad_fork_cleanup_signal: | 1273 | bad_fork_cleanup_signal: |
1285 | cleanup_signal(p); | 1274 | if (!(clone_flags & CLONE_THREAD)) |
1275 | __cleanup_signal(p->signal); | ||
1286 | bad_fork_cleanup_sighand: | 1276 | bad_fork_cleanup_sighand: |
1287 | __cleanup_sighand(p->sighand); | 1277 | __cleanup_sighand(p->sighand); |
1288 | bad_fork_cleanup_fs: | 1278 | bad_fork_cleanup_fs: |
diff --git a/lib/flex_array.c b/lib/flex_array.c index 08f1636d296a..7baed2fc3bc8 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -99,7 +99,8 @@ static inline int elements_fit_in_base(struct flex_array *fa) | |||
99 | * capacity in the base structure. Also note that no effort is made | 99 | * capacity in the base structure. Also note that no effort is made |
100 | * to efficiently pack objects across page boundaries. | 100 | * to efficiently pack objects across page boundaries. |
101 | */ | 101 | */ |
102 | struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) | 102 | struct flex_array *flex_array_alloc(int element_size, unsigned int total, |
103 | gfp_t flags) | ||
103 | { | 104 | { |
104 | struct flex_array *ret; | 105 | struct flex_array *ret; |
105 | int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); | 106 | int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); |
@@ -115,16 +116,14 @@ struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags) | |||
115 | return ret; | 116 | return ret; |
116 | } | 117 | } |
117 | 118 | ||
118 | static int fa_element_to_part_nr(struct flex_array *fa, int element_nr) | 119 | static int fa_element_to_part_nr(struct flex_array *fa, |
120 | unsigned int element_nr) | ||
119 | { | 121 | { |
120 | return element_nr / __elements_per_part(fa->element_size); | 122 | return element_nr / __elements_per_part(fa->element_size); |
121 | } | 123 | } |
122 | 124 | ||
123 | /** | 125 | /** |
124 | * flex_array_free_parts - just free the second-level pages | 126 | * flex_array_free_parts - just free the second-level pages |
125 | * @src: address of data to copy into the array | ||
126 | * @element_nr: index of the position in which to insert | ||
127 | * the new element. | ||
128 | * | 127 | * |
129 | * This is to be used in cases where the base 'struct flex_array' | 128 | * This is to be used in cases where the base 'struct flex_array' |
130 | * has been statically allocated and should not be free. | 129 | * has been statically allocated and should not be free. |
@@ -146,14 +145,12 @@ void flex_array_free(struct flex_array *fa) | |||
146 | kfree(fa); | 145 | kfree(fa); |
147 | } | 146 | } |
148 | 147 | ||
149 | static int fa_index_inside_part(struct flex_array *fa, int element_nr) | 148 | static unsigned int index_inside_part(struct flex_array *fa, |
149 | unsigned int element_nr) | ||
150 | { | 150 | { |
151 | return element_nr % __elements_per_part(fa->element_size); | 151 | unsigned int part_offset; |
152 | } | ||
153 | 152 | ||
154 | static int index_inside_part(struct flex_array *fa, int element_nr) | 153 | part_offset = element_nr % __elements_per_part(fa->element_size); |
155 | { | ||
156 | int part_offset = fa_index_inside_part(fa, element_nr); | ||
157 | return part_offset * fa->element_size; | 154 | return part_offset * fa->element_size; |
158 | } | 155 | } |
159 | 156 | ||
@@ -188,7 +185,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
188 | * | 185 | * |
189 | * Locking must be provided by the caller. | 186 | * Locking must be provided by the caller. |
190 | */ | 187 | */ |
191 | int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags) | 188 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, |
189 | gfp_t flags) | ||
192 | { | 190 | { |
193 | int part_nr = fa_element_to_part_nr(fa, element_nr); | 191 | int part_nr = fa_element_to_part_nr(fa, element_nr); |
194 | struct flex_array_part *part; | 192 | struct flex_array_part *part; |
@@ -198,10 +196,11 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags | |||
198 | return -ENOSPC; | 196 | return -ENOSPC; |
199 | if (elements_fit_in_base(fa)) | 197 | if (elements_fit_in_base(fa)) |
200 | part = (struct flex_array_part *)&fa->parts[0]; | 198 | part = (struct flex_array_part *)&fa->parts[0]; |
201 | else | 199 | else { |
202 | part = __fa_get_part(fa, part_nr, flags); | 200 | part = __fa_get_part(fa, part_nr, flags); |
203 | if (!part) | 201 | if (!part) |
204 | return -ENOMEM; | 202 | return -ENOMEM; |
203 | } | ||
205 | dst = &part->elements[index_inside_part(fa, element_nr)]; | 204 | dst = &part->elements[index_inside_part(fa, element_nr)]; |
206 | memcpy(dst, src, fa->element_size); | 205 | memcpy(dst, src, fa->element_size); |
207 | return 0; | 206 | return 0; |
@@ -219,7 +218,8 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags | |||
219 | * | 218 | * |
220 | * Locking must be provided by the caller. | 219 | * Locking must be provided by the caller. |
221 | */ | 220 | */ |
222 | int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) | 221 | int flex_array_prealloc(struct flex_array *fa, unsigned int start, |
222 | unsigned int end, gfp_t flags) | ||
223 | { | 223 | { |
224 | int start_part; | 224 | int start_part; |
225 | int end_part; | 225 | int end_part; |
@@ -250,18 +250,19 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags) | |||
250 | * | 250 | * |
251 | * Locking must be provided by the caller. | 251 | * Locking must be provided by the caller. |
252 | */ | 252 | */ |
253 | void *flex_array_get(struct flex_array *fa, int element_nr) | 253 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr) |
254 | { | 254 | { |
255 | int part_nr = fa_element_to_part_nr(fa, element_nr); | 255 | int part_nr = fa_element_to_part_nr(fa, element_nr); |
256 | struct flex_array_part *part; | 256 | struct flex_array_part *part; |
257 | 257 | ||
258 | if (element_nr >= fa->total_nr_elements) | 258 | if (element_nr >= fa->total_nr_elements) |
259 | return NULL; | 259 | return NULL; |
260 | if (!fa->parts[part_nr]) | ||
261 | return NULL; | ||
262 | if (elements_fit_in_base(fa)) | 260 | if (elements_fit_in_base(fa)) |
263 | part = (struct flex_array_part *)&fa->parts[0]; | 261 | part = (struct flex_array_part *)&fa->parts[0]; |
264 | else | 262 | else { |
265 | part = fa->parts[part_nr]; | 263 | part = fa->parts[part_nr]; |
264 | if (!part) | ||
265 | return NULL; | ||
266 | } | ||
266 | return &part->elements[index_inside_part(fa, element_nr)]; | 267 | return &part->elements[index_inside_part(fa, element_nr)]; |
267 | } | 268 | } |
@@ -358,6 +358,7 @@ static int page_referenced_one(struct page *page, | |||
358 | */ | 358 | */ |
359 | if (vma->vm_flags & VM_LOCKED) { | 359 | if (vma->vm_flags & VM_LOCKED) { |
360 | *mapcount = 1; /* break early from loop */ | 360 | *mapcount = 1; /* break early from loop */ |
361 | *vm_flags |= VM_LOCKED; | ||
361 | goto out_unmap; | 362 | goto out_unmap; |
362 | } | 363 | } |
363 | 364 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index dea7abd31098..94e86dd6954c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -630,9 +630,14 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
630 | 630 | ||
631 | referenced = page_referenced(page, 1, | 631 | referenced = page_referenced(page, 1, |
632 | sc->mem_cgroup, &vm_flags); | 632 | sc->mem_cgroup, &vm_flags); |
633 | /* In active use or really unfreeable? Activate it. */ | 633 | /* |
634 | * In active use or really unfreeable? Activate it. | ||
635 | * If page which have PG_mlocked lost isoltation race, | ||
636 | * try_to_unmap moves it to unevictable list | ||
637 | */ | ||
634 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && | 638 | if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && |
635 | referenced && page_mapping_inuse(page)) | 639 | referenced && page_mapping_inuse(page) |
640 | && !(vm_flags & VM_LOCKED)) | ||
636 | goto activate_locked; | 641 | goto activate_locked; |
637 | 642 | ||
638 | /* | 643 | /* |