aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2011-10-31 20:09:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:49 -0400
commit264e56d8247ef6e31ed4386926cae86c61ddcb18 (patch)
tree87e85ee670fb7ae4c0cd7bdeae700faff021bf48 /mm/vmscan.c
parent3f380998aeb51b99d5d22cadb41162e1e9db70d2 (diff)
mm: disable user interface to manually rescue unevictable pages
At one point, anonymous pages were supposed to go on the unevictable list when no swap space was configured, and the idea was to manually rescue those pages after adding swap and making them evictable again. But nowadays, swap-backed pages on the anon LRU list are not scanned without available swap space anyway, so there is no point in moving them to a separate list anymore. The manual rescue could also be used in case pages were stranded on the unevictable list due to race conditions. But the code has been around for a while now and newly discovered bugs should be properly reported and dealt with instead of relying on such a manual fixup. In addition to the lack of a usecase, the sysfs interface to rescue pages from a specific NUMA node has been broken since its introduction, so it's unlikely that anybody ever relied on that. This patch removes the functionality behind the sysctl and the node-interface and emits a one-time warning when somebody tries to access either of them. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Reported-by: Kautuk Consul <consul.kautuk@gmail.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c84
1 files changed, 8 insertions, 76 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ac644fe85589..3886b0bd7869 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3417,66 +3417,12 @@ void scan_mapping_unevictable_pages(struct address_space *mapping)
3417 3417
3418} 3418}
3419 3419
3420/** 3420static void warn_scan_unevictable_pages(void)
3421 * scan_zone_unevictable_pages - check unevictable list for evictable pages
3422 * @zone - zone of which to scan the unevictable list
3423 *
3424 * Scan @zone's unevictable LRU lists to check for pages that have become
3425 * evictable. Move those that have to @zone's inactive list where they
3426 * become candidates for reclaim, unless shrink_inactive_zone() decides
3427 * to reactivate them. Pages that are still unevictable are rotated
3428 * back onto @zone's unevictable list.
3429 */
3430#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
3431static void scan_zone_unevictable_pages(struct zone *zone)
3432{ 3421{
3433 struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list; 3422 printk_once(KERN_WARNING
3434 unsigned long scan; 3423 "The scan_unevictable_pages sysctl/node-interface has been "
3435 unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE); 3424 "disabled for lack of a legitimate use case. If you have "
3436 3425 "one, please send an email to linux-mm@kvack.org.\n");
3437 while (nr_to_scan > 0) {
3438 unsigned long batch_size = min(nr_to_scan,
3439 SCAN_UNEVICTABLE_BATCH_SIZE);
3440
3441 spin_lock_irq(&zone->lru_lock);
3442 for (scan = 0; scan < batch_size; scan++) {
3443 struct page *page = lru_to_page(l_unevictable);
3444
3445 if (!trylock_page(page))
3446 continue;
3447
3448 prefetchw_prev_lru_page(page, l_unevictable, flags);
3449
3450 if (likely(PageLRU(page) && PageUnevictable(page)))
3451 check_move_unevictable_page(page, zone);
3452
3453 unlock_page(page);
3454 }
3455 spin_unlock_irq(&zone->lru_lock);
3456
3457 nr_to_scan -= batch_size;
3458 }
3459}
3460
3461
3462/**
3463 * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
3464 *
3465 * A really big hammer: scan all zones' unevictable LRU lists to check for
3466 * pages that have become evictable. Move those back to the zones'
3467 * inactive list where they become candidates for reclaim.
3468 * This occurs when, e.g., we have unswappable pages on the unevictable lists,
3469 * and we add swap to the system. As such, it runs in the context of a task
3470 * that has possibly/probably made some previously unevictable pages
3471 * evictable.
3472 */
3473static void scan_all_zones_unevictable_pages(void)
3474{
3475 struct zone *zone;
3476
3477 for_each_zone(zone) {
3478 scan_zone_unevictable_pages(zone);
3479 }
3480} 3426}
3481 3427
3482/* 3428/*
@@ -3489,11 +3435,8 @@ int scan_unevictable_handler(struct ctl_table *table, int write,
3489 void __user *buffer, 3435 void __user *buffer,
3490 size_t *length, loff_t *ppos) 3436 size_t *length, loff_t *ppos)
3491{ 3437{
3438 warn_scan_unevictable_pages();
3492 proc_doulongvec_minmax(table, write, buffer, length, ppos); 3439 proc_doulongvec_minmax(table, write, buffer, length, ppos);
3493
3494 if (write && *(unsigned long *)table->data)
3495 scan_all_zones_unevictable_pages();
3496
3497 scan_unevictable_pages = 0; 3440 scan_unevictable_pages = 0;
3498 return 0; 3441 return 0;
3499} 3442}
@@ -3508,6 +3451,7 @@ static ssize_t read_scan_unevictable_node(struct sys_device *dev,
3508 struct sysdev_attribute *attr, 3451 struct sysdev_attribute *attr,
3509 char *buf) 3452 char *buf)
3510{ 3453{
3454 warn_scan_unevictable_pages();
3511 return sprintf(buf, "0\n"); /* always zero; should fit... */ 3455 return sprintf(buf, "0\n"); /* always zero; should fit... */
3512} 3456}
3513 3457
@@ -3515,19 +3459,7 @@ static ssize_t write_scan_unevictable_node(struct sys_device *dev,
3515 struct sysdev_attribute *attr, 3459 struct sysdev_attribute *attr,
3516 const char *buf, size_t count) 3460 const char *buf, size_t count)
3517{ 3461{
3518 struct zone *node_zones = NODE_DATA(dev->id)->node_zones; 3462 warn_scan_unevictable_pages();
3519 struct zone *zone;
3520 unsigned long res;
3521 unsigned long req = strict_strtoul(buf, 10, &res);
3522
3523 if (req || !res)
3524 return 1; /* Invalid input or zero is no-op */
3525
3526 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
3527 if (!populated_zone(zone))
3528 continue;
3529 scan_zone_unevictable_pages(zone);
3530 }
3531 return 1; 3463 return 1;
3532} 3464}
3533 3465