diff options
author | Lee Schermerhorn <Lee.Schermerhorn@hp.com> | 2008-10-18 23:26:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-20 11:50:26 -0400 |
commit | 89e004ea55abe201b29e2d6e35124101f1288ef7 (patch) | |
tree | 272a8f453106fd33d66fd7153f44696648dbe8b6 /mm/vmscan.c | |
parent | ba9ddf49391645e6bb93219131a40446538a5e76 (diff) |
SHM_LOCKED pages are unevictable
Shmem segments locked into memory via shmctl(SHM_LOCKED) should not be
kept on the normal LRU, since scanning them is a waste of time and might
throw off kswapd's balancing algorithms. Place them on the unevictable
LRU list instead.
Use the AS_UNEVICTABLE flag to mark address_space of SHM_LOCKed shared
memory regions as unevictable. Then these pages will be culled off the
normal LRU lists during vmscan.
Add new wrapper function to clear the mapping's unevictable state when/if
shared memory segment is munlocked.
Add 'scan_mapping_unevictable_page()' to mm/vmscan.c to scan all pages in
the shmem segment's mapping [struct address_space] for evictability now
that they're no longer locked. If so, move them to the appropriate zone
lru list.
Changes depend on [CONFIG_]UNEVICTABLE_LRU.
[kosaki.motohiro@jp.fujitsu.com: revert shm change]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Kosaki Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 9babfbc1ddc8..dfb342e0db9b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2346,4 +2346,93 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) | |||
2346 | 2346 | ||
2347 | return 1; | 2347 | return 1; |
2348 | } | 2348 | } |
2349 | |||
2350 | /** | ||
2351 | * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list | ||
2352 | * @page: page to check evictability and move to appropriate lru list | ||
2353 | * @zone: zone page is in | ||
2354 | * | ||
2355 | * Checks a page for evictability and moves the page to the appropriate | ||
2356 | * zone lru list. | ||
2357 | * | ||
2358 | * Restrictions: zone->lru_lock must be held, page must be on LRU and must | ||
2359 | * have PageUnevictable set. | ||
2360 | */ | ||
2361 | static void check_move_unevictable_page(struct page *page, struct zone *zone) | ||
2362 | { | ||
2363 | VM_BUG_ON(PageActive(page)); | ||
2364 | |||
2365 | retry: | ||
2366 | ClearPageUnevictable(page); | ||
2367 | if (page_evictable(page, NULL)) { | ||
2368 | enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); | ||
2369 | __dec_zone_state(zone, NR_UNEVICTABLE); | ||
2370 | list_move(&page->lru, &zone->lru[l].list); | ||
2371 | __inc_zone_state(zone, NR_INACTIVE_ANON + l); | ||
2372 | __count_vm_event(UNEVICTABLE_PGRESCUED); | ||
2373 | } else { | ||
2374 | /* | ||
2375 | * rotate unevictable list | ||
2376 | */ | ||
2377 | SetPageUnevictable(page); | ||
2378 | list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); | ||
2379 | if (page_evictable(page, NULL)) | ||
2380 | goto retry; | ||
2381 | } | ||
2382 | } | ||
2383 | |||
2384 | /** | ||
2385 | * scan_mapping_unevictable_pages - scan an address space for evictable pages | ||
2386 | * @mapping: struct address_space to scan for evictable pages | ||
2387 | * | ||
2388 | * Scan all pages in mapping. Check unevictable pages for | ||
2389 | * evictability and move them to the appropriate zone lru list. | ||
2390 | */ | ||
2391 | void scan_mapping_unevictable_pages(struct address_space *mapping) | ||
2392 | { | ||
2393 | pgoff_t next = 0; | ||
2394 | pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >> | ||
2395 | PAGE_CACHE_SHIFT; | ||
2396 | struct zone *zone; | ||
2397 | struct pagevec pvec; | ||
2398 | |||
2399 | if (mapping->nrpages == 0) | ||
2400 | return; | ||
2401 | |||
2402 | pagevec_init(&pvec, 0); | ||
2403 | while (next < end && | ||
2404 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | ||
2405 | int i; | ||
2406 | int pg_scanned = 0; | ||
2407 | |||
2408 | zone = NULL; | ||
2409 | |||
2410 | for (i = 0; i < pagevec_count(&pvec); i++) { | ||
2411 | struct page *page = pvec.pages[i]; | ||
2412 | pgoff_t page_index = page->index; | ||
2413 | struct zone *pagezone = page_zone(page); | ||
2414 | |||
2415 | pg_scanned++; | ||
2416 | if (page_index > next) | ||
2417 | next = page_index; | ||
2418 | next++; | ||
2419 | |||
2420 | if (pagezone != zone) { | ||
2421 | if (zone) | ||
2422 | spin_unlock_irq(&zone->lru_lock); | ||
2423 | zone = pagezone; | ||
2424 | spin_lock_irq(&zone->lru_lock); | ||
2425 | } | ||
2426 | |||
2427 | if (PageLRU(page) && PageUnevictable(page)) | ||
2428 | check_move_unevictable_page(page, zone); | ||
2429 | } | ||
2430 | if (zone) | ||
2431 | spin_unlock_irq(&zone->lru_lock); | ||
2432 | pagevec_release(&pvec); | ||
2433 | |||
2434 | count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned); | ||
2435 | } | ||
2436 | |||
2437 | } | ||
2349 | #endif | 2438 | #endif |