diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/power/disk.c | 1 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 9 | ||||
| -rw-r--r-- | kernel/power/swsusp.c | 18 | ||||
| -rw-r--r-- | kernel/sched.c | 23 | ||||
| -rw-r--r-- | kernel/sysctl.c | 2 |
5 files changed, 33 insertions, 20 deletions
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index e886d1332a10..f3db382c2b2d 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/console.h> | 22 | #include <linux/console.h> |
| 23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
| 24 | #include <linux/freezer.h> | 24 | #include <linux/freezer.h> |
| 25 | #include <asm/suspend.h> | ||
| 25 | 26 | ||
| 26 | #include "power.h" | 27 | #include "power.h" |
| 27 | 28 | ||
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index f5fc2d7680f2..33e2e4a819f9 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -321,13 +321,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) | |||
| 321 | 321 | ||
| 322 | INIT_LIST_HEAD(list); | 322 | INIT_LIST_HEAD(list); |
| 323 | 323 | ||
| 324 | for_each_zone(zone) { | 324 | for_each_populated_zone(zone) { |
| 325 | unsigned long zone_start, zone_end; | 325 | unsigned long zone_start, zone_end; |
| 326 | struct mem_extent *ext, *cur, *aux; | 326 | struct mem_extent *ext, *cur, *aux; |
| 327 | 327 | ||
| 328 | if (!populated_zone(zone)) | ||
| 329 | continue; | ||
| 330 | |||
| 331 | zone_start = zone->zone_start_pfn; | 328 | zone_start = zone->zone_start_pfn; |
| 332 | zone_end = zone->zone_start_pfn + zone->spanned_pages; | 329 | zone_end = zone->zone_start_pfn + zone->spanned_pages; |
| 333 | 330 | ||
| @@ -804,8 +801,8 @@ static unsigned int count_free_highmem_pages(void) | |||
| 804 | struct zone *zone; | 801 | struct zone *zone; |
| 805 | unsigned int cnt = 0; | 802 | unsigned int cnt = 0; |
| 806 | 803 | ||
| 807 | for_each_zone(zone) | 804 | for_each_populated_zone(zone) |
| 808 | if (populated_zone(zone) && is_highmem(zone)) | 805 | if (is_highmem(zone)) |
| 809 | cnt += zone_page_state(zone, NR_FREE_PAGES); | 806 | cnt += zone_page_state(zone, NR_FREE_PAGES); |
| 810 | 807 | ||
| 811 | return cnt; | 808 | return cnt; |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index a92c91451559..78c35047586d 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <linux/highmem.h> | 51 | #include <linux/highmem.h> |
| 52 | #include <linux/time.h> | 52 | #include <linux/time.h> |
| 53 | #include <linux/rbtree.h> | 53 | #include <linux/rbtree.h> |
| 54 | #include <linux/io.h> | ||
| 54 | 55 | ||
| 55 | #include "power.h" | 56 | #include "power.h" |
| 56 | 57 | ||
| @@ -229,17 +230,16 @@ int swsusp_shrink_memory(void) | |||
| 229 | size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; | 230 | size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; |
| 230 | tmp = size; | 231 | tmp = size; |
| 231 | size += highmem_size; | 232 | size += highmem_size; |
| 232 | for_each_zone (zone) | 233 | for_each_populated_zone(zone) { |
| 233 | if (populated_zone(zone)) { | 234 | tmp += snapshot_additional_pages(zone); |
| 234 | tmp += snapshot_additional_pages(zone); | 235 | if (is_highmem(zone)) { |
| 235 | if (is_highmem(zone)) { | 236 | highmem_size -= |
| 236 | highmem_size -= | ||
| 237 | zone_page_state(zone, NR_FREE_PAGES); | 237 | zone_page_state(zone, NR_FREE_PAGES); |
| 238 | } else { | 238 | } else { |
| 239 | tmp -= zone_page_state(zone, NR_FREE_PAGES); | 239 | tmp -= zone_page_state(zone, NR_FREE_PAGES); |
| 240 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; | 240 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; |
| 241 | } | ||
| 242 | } | 241 | } |
| 242 | } | ||
| 243 | 243 | ||
| 244 | if (highmem_size < 0) | 244 | if (highmem_size < 0) |
| 245 | highmem_size = 0; | 245 | highmem_size = 0; |
diff --git a/kernel/sched.c b/kernel/sched.c index 196d48babbef..73513f4e19df 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -5196,11 +5196,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
| 5196 | __wake_up_common(q, mode, 1, 0, NULL); | 5196 | __wake_up_common(q, mode, 1, 0, NULL); |
| 5197 | } | 5197 | } |
| 5198 | 5198 | ||
| 5199 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | ||
| 5200 | { | ||
| 5201 | __wake_up_common(q, mode, 1, 0, key); | ||
| 5202 | } | ||
| 5203 | |||
| 5199 | /** | 5204 | /** |
| 5200 | * __wake_up_sync - wake up threads blocked on a waitqueue. | 5205 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
| 5201 | * @q: the waitqueue | 5206 | * @q: the waitqueue |
| 5202 | * @mode: which threads | 5207 | * @mode: which threads |
| 5203 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | 5208 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
| 5209 | * @key: opaque value to be passed to wakeup targets | ||
| 5204 | * | 5210 | * |
| 5205 | * The sync wakeup differs that the waker knows that it will schedule | 5211 | * The sync wakeup differs that the waker knows that it will schedule |
| 5206 | * away soon, so while the target thread will be woken up, it will not | 5212 | * away soon, so while the target thread will be woken up, it will not |
| @@ -5209,8 +5215,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
| 5209 | * | 5215 | * |
| 5210 | * On UP it can prevent extra preemption. | 5216 | * On UP it can prevent extra preemption. |
| 5211 | */ | 5217 | */ |
| 5212 | void | 5218 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, |
| 5213 | __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | 5219 | int nr_exclusive, void *key) |
| 5214 | { | 5220 | { |
| 5215 | unsigned long flags; | 5221 | unsigned long flags; |
| 5216 | int sync = 1; | 5222 | int sync = 1; |
| @@ -5222,9 +5228,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | |||
| 5222 | sync = 0; | 5228 | sync = 0; |
| 5223 | 5229 | ||
| 5224 | spin_lock_irqsave(&q->lock, flags); | 5230 | spin_lock_irqsave(&q->lock, flags); |
| 5225 | __wake_up_common(q, mode, nr_exclusive, sync, NULL); | 5231 | __wake_up_common(q, mode, nr_exclusive, sync, key); |
| 5226 | spin_unlock_irqrestore(&q->lock, flags); | 5232 | spin_unlock_irqrestore(&q->lock, flags); |
| 5227 | } | 5233 | } |
| 5234 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | ||
| 5235 | |||
| 5236 | /* | ||
| 5237 | * __wake_up_sync - see __wake_up_sync_key() | ||
| 5238 | */ | ||
| 5239 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||
| 5240 | { | ||
| 5241 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); | ||
| 5242 | } | ||
| 5228 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | 5243 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
| 5229 | 5244 | ||
| 5230 | /** | 5245 | /** |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c5ef44ff850f..2e490a389dd2 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -1010,7 +1010,7 @@ static struct ctl_table vm_table[] = { | |||
| 1010 | .data = &dirty_expire_interval, | 1010 | .data = &dirty_expire_interval, |
| 1011 | .maxlen = sizeof(dirty_expire_interval), | 1011 | .maxlen = sizeof(dirty_expire_interval), |
| 1012 | .mode = 0644, | 1012 | .mode = 0644, |
| 1013 | .proc_handler = &proc_dointvec_userhz_jiffies, | 1013 | .proc_handler = &proc_dointvec, |
| 1014 | }, | 1014 | }, |
| 1015 | { | 1015 | { |
| 1016 | .ctl_name = VM_NR_PDFLUSH_THREADS, | 1016 | .ctl_name = VM_NR_PDFLUSH_THREADS, |
