diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/backing-dev.c | 7 | ||||
| -rw-r--r-- | mm/filemap.c | 1 | ||||
| -rw-r--r-- | mm/memcontrol.c | 2 | ||||
| -rw-r--r-- | mm/page-writeback.c | 8 | ||||
| -rw-r--r-- | mm/page_alloc.c | 6 | ||||
| -rw-r--r-- | mm/slab.c | 8 | ||||
| -rw-r--r-- | mm/slob.c | 2 | ||||
| -rw-r--r-- | mm/slub.c | 2 | ||||
| -rw-r--r-- | mm/vmscan.c | 8 |
9 files changed, 24 insertions, 20 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 493b468a5035..c86edd244294 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -283,7 +283,6 @@ static wait_queue_head_t congestion_wqh[2] = { | |||
| 283 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | 283 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
| 284 | }; | 284 | }; |
| 285 | 285 | ||
| 286 | |||
| 287 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | 286 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
| 288 | { | 287 | { |
| 289 | enum bdi_state bit; | 288 | enum bdi_state bit; |
| @@ -308,18 +307,18 @@ EXPORT_SYMBOL(set_bdi_congested); | |||
| 308 | 307 | ||
| 309 | /** | 308 | /** |
| 310 | * congestion_wait - wait for a backing_dev to become uncongested | 309 | * congestion_wait - wait for a backing_dev to become uncongested |
| 311 | * @rw: READ or WRITE | 310 | * @sync: SYNC or ASYNC IO |
| 312 | * @timeout: timeout in jiffies | 311 | * @timeout: timeout in jiffies |
| 313 | * | 312 | * |
| 314 | * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit | 313 | * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit |
| 315 | * write congestion. If no backing_devs are congested then just wait for the | 314 | * write congestion. If no backing_devs are congested then just wait for the |
| 316 | * next write to be completed. | 315 | * next write to be completed. |
| 317 | */ | 316 | */ |
| 318 | long congestion_wait(int rw, long timeout) | 317 | long congestion_wait(int sync, long timeout) |
| 319 | { | 318 | { |
| 320 | long ret; | 319 | long ret; |
| 321 | DEFINE_WAIT(wait); | 320 | DEFINE_WAIT(wait); |
| 322 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | 321 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
| 323 | 322 | ||
| 324 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | 323 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
| 325 | ret = io_schedule_timeout(timeout); | 324 | ret = io_schedule_timeout(timeout); |
diff --git a/mm/filemap.c b/mm/filemap.c index 22396713feb9..ccea3b665c12 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -2272,6 +2272,7 @@ again: | |||
| 2272 | pagefault_enable(); | 2272 | pagefault_enable(); |
| 2273 | flush_dcache_page(page); | 2273 | flush_dcache_page(page); |
| 2274 | 2274 | ||
| 2275 | mark_page_accessed(page); | ||
| 2275 | status = a_ops->write_end(file, mapping, pos, bytes, copied, | 2276 | status = a_ops->write_end(file, mapping, pos, bytes, copied, |
| 2276 | page, fsdata); | 2277 | page, fsdata); |
| 2277 | if (unlikely(status < 0)) | 2278 | if (unlikely(status < 0)) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e2fa20dadf40..e717964cb5a0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -1973,7 +1973,7 @@ try_to_free: | |||
| 1973 | if (!progress) { | 1973 | if (!progress) { |
| 1974 | nr_retries--; | 1974 | nr_retries--; |
| 1975 | /* maybe some writeback is necessary */ | 1975 | /* maybe some writeback is necessary */ |
| 1976 | congestion_wait(WRITE, HZ/10); | 1976 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 1977 | } | 1977 | } |
| 1978 | 1978 | ||
| 1979 | } | 1979 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 7687879253b9..81627ebcd313 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -575,7 +575,7 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
| 575 | if (pages_written >= write_chunk) | 575 | if (pages_written >= write_chunk) |
| 576 | break; /* We've done our duty */ | 576 | break; /* We've done our duty */ |
| 577 | 577 | ||
| 578 | congestion_wait(WRITE, HZ/10); | 578 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 579 | } | 579 | } |
| 580 | 580 | ||
| 581 | if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && | 581 | if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && |
| @@ -669,7 +669,7 @@ void throttle_vm_writeout(gfp_t gfp_mask) | |||
| 669 | if (global_page_state(NR_UNSTABLE_NFS) + | 669 | if (global_page_state(NR_UNSTABLE_NFS) + |
| 670 | global_page_state(NR_WRITEBACK) <= dirty_thresh) | 670 | global_page_state(NR_WRITEBACK) <= dirty_thresh) |
| 671 | break; | 671 | break; |
| 672 | congestion_wait(WRITE, HZ/10); | 672 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 673 | 673 | ||
| 674 | /* | 674 | /* |
| 675 | * The caller might hold locks which can prevent IO completion | 675 | * The caller might hold locks which can prevent IO completion |
| @@ -715,7 +715,7 @@ static void background_writeout(unsigned long _min_pages) | |||
| 715 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { | 715 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { |
| 716 | /* Wrote less than expected */ | 716 | /* Wrote less than expected */ |
| 717 | if (wbc.encountered_congestion || wbc.more_io) | 717 | if (wbc.encountered_congestion || wbc.more_io) |
| 718 | congestion_wait(WRITE, HZ/10); | 718 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 719 | else | 719 | else |
| 720 | break; | 720 | break; |
| 721 | } | 721 | } |
| @@ -787,7 +787,7 @@ static void wb_kupdate(unsigned long arg) | |||
| 787 | writeback_inodes(&wbc); | 787 | writeback_inodes(&wbc); |
| 788 | if (wbc.nr_to_write > 0) { | 788 | if (wbc.nr_to_write > 0) { |
| 789 | if (wbc.encountered_congestion || wbc.more_io) | 789 | if (wbc.encountered_congestion || wbc.more_io) |
| 790 | congestion_wait(WRITE, HZ/10); | 790 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 791 | else | 791 | else |
| 792 | break; /* All the old data is written */ | 792 | break; /* All the old data is written */ |
| 793 | } | 793 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e0f2cdf9d8b1..a35eeab2724c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -1666,7 +1666,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, | |||
| 1666 | preferred_zone, migratetype); | 1666 | preferred_zone, migratetype); |
| 1667 | 1667 | ||
| 1668 | if (!page && gfp_mask & __GFP_NOFAIL) | 1668 | if (!page && gfp_mask & __GFP_NOFAIL) |
| 1669 | congestion_wait(WRITE, HZ/50); | 1669 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
| 1670 | } while (!page && (gfp_mask & __GFP_NOFAIL)); | 1670 | } while (!page && (gfp_mask & __GFP_NOFAIL)); |
| 1671 | 1671 | ||
| 1672 | return page; | 1672 | return page; |
| @@ -1831,7 +1831,7 @@ rebalance: | |||
| 1831 | pages_reclaimed += did_some_progress; | 1831 | pages_reclaimed += did_some_progress; |
| 1832 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { | 1832 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { |
| 1833 | /* Wait for some write requests to complete then retry */ | 1833 | /* Wait for some write requests to complete then retry */ |
| 1834 | congestion_wait(WRITE, HZ/50); | 1834 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
| 1835 | goto rebalance; | 1835 | goto rebalance; |
| 1836 | } | 1836 | } |
| 1837 | 1837 | ||
| @@ -1983,7 +1983,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | |||
| 1983 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | 1983 | unsigned long alloc_end = addr + (PAGE_SIZE << order); |
| 1984 | unsigned long used = addr + PAGE_ALIGN(size); | 1984 | unsigned long used = addr + PAGE_ALIGN(size); |
| 1985 | 1985 | ||
| 1986 | split_page(virt_to_page(addr), order); | 1986 | split_page(virt_to_page((void *)addr), order); |
| 1987 | while (used < alloc_end) { | 1987 | while (used < alloc_end) { |
| 1988 | free_page(used); | 1988 | free_page(used); |
| 1989 | used += PAGE_SIZE; | 1989 | used += PAGE_SIZE; |
| @@ -1544,9 +1544,6 @@ void __init kmem_cache_init(void) | |||
| 1544 | } | 1544 | } |
| 1545 | 1545 | ||
| 1546 | g_cpucache_up = EARLY; | 1546 | g_cpucache_up = EARLY; |
| 1547 | |||
| 1548 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
| 1549 | init_lock_keys(); | ||
| 1550 | } | 1547 | } |
| 1551 | 1548 | ||
| 1552 | void __init kmem_cache_init_late(void) | 1549 | void __init kmem_cache_init_late(void) |
| @@ -1563,6 +1560,9 @@ void __init kmem_cache_init_late(void) | |||
| 1563 | /* Done! */ | 1560 | /* Done! */ |
| 1564 | g_cpucache_up = FULL; | 1561 | g_cpucache_up = FULL; |
| 1565 | 1562 | ||
| 1563 | /* Annotate slab for lockdep -- annotate the malloc caches */ | ||
| 1564 | init_lock_keys(); | ||
| 1565 | |||
| 1566 | /* | 1566 | /* |
| 1567 | * Register a cpu startup notifier callback that initializes | 1567 | * Register a cpu startup notifier callback that initializes |
| 1568 | * cpu_cache_get for all new cpus | 1568 | * cpu_cache_get for all new cpus |
| @@ -2547,7 +2547,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep) | |||
| 2547 | } | 2547 | } |
| 2548 | 2548 | ||
| 2549 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) | 2549 | if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) |
| 2550 | synchronize_rcu(); | 2550 | rcu_barrier(); |
| 2551 | 2551 | ||
| 2552 | __kmem_cache_destroy(cachep); | 2552 | __kmem_cache_destroy(cachep); |
| 2553 | mutex_unlock(&cache_chain_mutex); | 2553 | mutex_unlock(&cache_chain_mutex); |
| @@ -595,6 +595,8 @@ EXPORT_SYMBOL(kmem_cache_create); | |||
| 595 | void kmem_cache_destroy(struct kmem_cache *c) | 595 | void kmem_cache_destroy(struct kmem_cache *c) |
| 596 | { | 596 | { |
| 597 | kmemleak_free(c); | 597 | kmemleak_free(c); |
| 598 | if (c->flags & SLAB_DESTROY_BY_RCU) | ||
| 599 | rcu_barrier(); | ||
| 598 | slob_free(c, sizeof(struct kmem_cache)); | 600 | slob_free(c, sizeof(struct kmem_cache)); |
| 599 | } | 601 | } |
| 600 | EXPORT_SYMBOL(kmem_cache_destroy); | 602 | EXPORT_SYMBOL(kmem_cache_destroy); |
| @@ -2595,6 +2595,8 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
| 2595 | */ | 2595 | */ |
| 2596 | void kmem_cache_destroy(struct kmem_cache *s) | 2596 | void kmem_cache_destroy(struct kmem_cache *s) |
| 2597 | { | 2597 | { |
| 2598 | if (s->flags & SLAB_DESTROY_BY_RCU) | ||
| 2599 | rcu_barrier(); | ||
| 2598 | down_write(&slub_lock); | 2600 | down_write(&slub_lock); |
| 2599 | s->refcount--; | 2601 | s->refcount--; |
| 2600 | if (!s->refcount) { | 2602 | if (!s->refcount) { |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 54155268dfca..dea7abd31098 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -1104,7 +1104,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
| 1104 | */ | 1104 | */ |
| 1105 | if (nr_freed < nr_taken && !current_is_kswapd() && | 1105 | if (nr_freed < nr_taken && !current_is_kswapd() && |
| 1106 | lumpy_reclaim) { | 1106 | lumpy_reclaim) { |
| 1107 | congestion_wait(WRITE, HZ/10); | 1107 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 1108 | 1108 | ||
| 1109 | /* | 1109 | /* |
| 1110 | * The attempt at page out may have made some | 1110 | * The attempt at page out may have made some |
| @@ -1721,7 +1721,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
| 1721 | 1721 | ||
| 1722 | /* Take a nap, wait for some writeback to complete */ | 1722 | /* Take a nap, wait for some writeback to complete */ |
| 1723 | if (sc->nr_scanned && priority < DEF_PRIORITY - 2) | 1723 | if (sc->nr_scanned && priority < DEF_PRIORITY - 2) |
| 1724 | congestion_wait(WRITE, HZ/10); | 1724 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 1725 | } | 1725 | } |
| 1726 | /* top priority shrink_zones still had more to do? don't OOM, then */ | 1726 | /* top priority shrink_zones still had more to do? don't OOM, then */ |
| 1727 | if (!sc->all_unreclaimable && scanning_global_lru(sc)) | 1727 | if (!sc->all_unreclaimable && scanning_global_lru(sc)) |
| @@ -1960,7 +1960,7 @@ loop_again: | |||
| 1960 | * another pass across the zones. | 1960 | * another pass across the zones. |
| 1961 | */ | 1961 | */ |
| 1962 | if (total_scanned && priority < DEF_PRIORITY - 2) | 1962 | if (total_scanned && priority < DEF_PRIORITY - 2) |
| 1963 | congestion_wait(WRITE, HZ/10); | 1963 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 1964 | 1964 | ||
| 1965 | /* | 1965 | /* |
| 1966 | * We do this so kswapd doesn't build up large priorities for | 1966 | * We do this so kswapd doesn't build up large priorities for |
| @@ -2233,7 +2233,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
| 2233 | goto out; | 2233 | goto out; |
| 2234 | 2234 | ||
| 2235 | if (sc.nr_scanned && prio < DEF_PRIORITY - 2) | 2235 | if (sc.nr_scanned && prio < DEF_PRIORITY - 2) |
| 2236 | congestion_wait(WRITE, HZ / 10); | 2236 | congestion_wait(BLK_RW_ASYNC, HZ / 10); |
| 2237 | } | 2237 | } |
| 2238 | } | 2238 | } |
| 2239 | 2239 | ||
