diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-18 18:08:12 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-18 18:08:12 -0500 |
commit | 673ab8783b596cda5b616b317b1a1b47480c66fd (patch) | |
tree | d3fc9bb4279720c53d0dc69c2a34c40635cf05f3 /mm/vmscan.c | |
parent | d7b96ca5d08a8f2f836feb2b3b3bd721d2837a8e (diff) | |
parent | 3cf23841b4b76eb94d3f8d0fb3627690e4431413 (diff) |
Merge branch 'akpm' (more patches from Andrew)
Merge patches from Andrew Morton:
"Most of the rest of MM, plus a few dribs and drabs.
I still have quite a few irritating patches left around: ones with
dubious testing results, lack of review, ones which should have gone
via maintainer trees but the maintainers are slack, etc.
I need to be more activist in getting these things wrapped up outside
the merge window, but they're such a PITA."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (48 commits)
mm/vmscan.c: avoid possible deadlock caused by too_many_isolated()
vmscan: comment too_many_isolated()
mm/kmemleak.c: remove obsolete simple_strtoul
mm/memory_hotplug.c: improve comments
mm/hugetlb: create hugetlb cgroup file in hugetlb_init
mm/mprotect.c: coding-style cleanups
Documentation: ABI: /sys/devices/system/node/
slub: drop mutex before deleting sysfs entry
memcg: add comments clarifying aspects of cache attribute propagation
kmem: add slab-specific documentation about the kmem controller
slub: slub-specific propagation changes
slab: propagate tunable values
memcg: aggregate memcg cache values in slabinfo
memcg/sl[au]b: shrink dead caches
memcg/sl[au]b: track all the memcg children of a kmem_cache
memcg: destroy memcg caches
sl[au]b: allocate objects from memcg cache
sl[au]b: always get the cache from its page in kmem_cache_free()
memcg: skip memcg kmem allocations in specified code regions
memcg: infrastructure to match an allocation to the right cache
...
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 14 |
1 files changed, 13 insertions, 1 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7f3096137b8a..828530e2794a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1177,7 +1177,11 @@ int isolate_lru_page(struct page *page) | |||
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | /* | 1179 | /* |
1180 | * Are there way too many processes in the direct reclaim path already? | 1180 | * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and |
1181 | * then get resheduled. When there are massive number of tasks doing page | ||
1182 | * allocation, such sleeping direct reclaimers may keep piling up on each CPU, | ||
1183 | * the LRU list will go small and be scanned faster than necessary, leading to | ||
1184 | * unnecessary swapping, thrashing and OOM. | ||
1181 | */ | 1185 | */ |
1182 | static int too_many_isolated(struct zone *zone, int file, | 1186 | static int too_many_isolated(struct zone *zone, int file, |
1183 | struct scan_control *sc) | 1187 | struct scan_control *sc) |
@@ -1198,6 +1202,14 @@ static int too_many_isolated(struct zone *zone, int file, | |||
1198 | isolated = zone_page_state(zone, NR_ISOLATED_ANON); | 1202 | isolated = zone_page_state(zone, NR_ISOLATED_ANON); |
1199 | } | 1203 | } |
1200 | 1204 | ||
1205 | /* | ||
1206 | * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they | ||
1207 | * won't get blocked by normal direct-reclaimers, forming a circular | ||
1208 | * deadlock. | ||
1209 | */ | ||
1210 | if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS) | ||
1211 | inactive >>= 3; | ||
1212 | |||
1201 | return isolated > inactive; | 1213 | return isolated > inactive; |
1202 | } | 1214 | } |
1203 | 1215 | ||