diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-01-21 18:49:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 19:19:45 -0500 |
commit | 0f843c6ac318bb3ea7b63437b66dd39d8f01b088 (patch) | |
tree | 9640f1c83217b9fe5d0d364ca697ed15bb605343 | |
parent | b854f711f6b8b49674d494c5e6d706096dd38301 (diff) |
mm/rmap: factor nonlinear handling out of try_to_unmap_file()
To merge all kinds of rmap traverse functions, try_to_unmap(),
try_to_munlock(), page_referenced() and page_mkclean(), we need to
extract common parts and separate out non-common parts.
Nonlinear handling is handled just in try_to_unmap_file() and other rmap
traverse functions doesn't care of it. Therfore it is better to factor
nonlinear handling out of try_to_unmap_file() in order to merge all
kinds of rmap traverse functions easily.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/rmap.c | 136 |
1 files changed, 74 insertions, 62 deletions
@@ -1426,6 +1426,79 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1426 | return ret; | 1426 | return ret; |
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | static int try_to_unmap_nonlinear(struct page *page, | ||
1430 | struct address_space *mapping, struct vm_area_struct *vma) | ||
1431 | { | ||
1432 | int ret = SWAP_AGAIN; | ||
1433 | unsigned long cursor; | ||
1434 | unsigned long max_nl_cursor = 0; | ||
1435 | unsigned long max_nl_size = 0; | ||
1436 | unsigned int mapcount; | ||
1437 | |||
1438 | list_for_each_entry(vma, | ||
1439 | &mapping->i_mmap_nonlinear, shared.nonlinear) { | ||
1440 | |||
1441 | cursor = (unsigned long) vma->vm_private_data; | ||
1442 | if (cursor > max_nl_cursor) | ||
1443 | max_nl_cursor = cursor; | ||
1444 | cursor = vma->vm_end - vma->vm_start; | ||
1445 | if (cursor > max_nl_size) | ||
1446 | max_nl_size = cursor; | ||
1447 | } | ||
1448 | |||
1449 | if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ | ||
1450 | return SWAP_FAIL; | ||
1451 | } | ||
1452 | |||
1453 | /* | ||
1454 | * We don't try to search for this page in the nonlinear vmas, | ||
1455 | * and page_referenced wouldn't have found it anyway. Instead | ||
1456 | * just walk the nonlinear vmas trying to age and unmap some. | ||
1457 | * The mapcount of the page we came in with is irrelevant, | ||
1458 | * but even so use it as a guide to how hard we should try? | ||
1459 | */ | ||
1460 | mapcount = page_mapcount(page); | ||
1461 | if (!mapcount) | ||
1462 | return ret; | ||
1463 | |||
1464 | cond_resched(); | ||
1465 | |||
1466 | max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; | ||
1467 | if (max_nl_cursor == 0) | ||
1468 | max_nl_cursor = CLUSTER_SIZE; | ||
1469 | |||
1470 | do { | ||
1471 | list_for_each_entry(vma, | ||
1472 | &mapping->i_mmap_nonlinear, shared.nonlinear) { | ||
1473 | |||
1474 | cursor = (unsigned long) vma->vm_private_data; | ||
1475 | while (cursor < max_nl_cursor && | ||
1476 | cursor < vma->vm_end - vma->vm_start) { | ||
1477 | if (try_to_unmap_cluster(cursor, &mapcount, | ||
1478 | vma, page) == SWAP_MLOCK) | ||
1479 | ret = SWAP_MLOCK; | ||
1480 | cursor += CLUSTER_SIZE; | ||
1481 | vma->vm_private_data = (void *) cursor; | ||
1482 | if ((int)mapcount <= 0) | ||
1483 | return ret; | ||
1484 | } | ||
1485 | vma->vm_private_data = (void *) max_nl_cursor; | ||
1486 | } | ||
1487 | cond_resched(); | ||
1488 | max_nl_cursor += CLUSTER_SIZE; | ||
1489 | } while (max_nl_cursor <= max_nl_size); | ||
1490 | |||
1491 | /* | ||
1492 | * Don't loop forever (perhaps all the remaining pages are | ||
1493 | * in locked vmas). Reset cursor on all unreserved nonlinear | ||
1494 | * vmas, now forgetting on which ones it had fallen behind. | ||
1495 | */ | ||
1496 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) | ||
1497 | vma->vm_private_data = NULL; | ||
1498 | |||
1499 | return ret; | ||
1500 | } | ||
1501 | |||
1429 | bool is_vma_temporary_stack(struct vm_area_struct *vma) | 1502 | bool is_vma_temporary_stack(struct vm_area_struct *vma) |
1430 | { | 1503 | { |
1431 | int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); | 1504 | int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); |
@@ -1515,10 +1588,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1515 | pgoff_t pgoff = page->index << compound_order(page); | 1588 | pgoff_t pgoff = page->index << compound_order(page); |
1516 | struct vm_area_struct *vma; | 1589 | struct vm_area_struct *vma; |
1517 | int ret = SWAP_AGAIN; | 1590 | int ret = SWAP_AGAIN; |
1518 | unsigned long cursor; | ||
1519 | unsigned long max_nl_cursor = 0; | ||
1520 | unsigned long max_nl_size = 0; | ||
1521 | unsigned int mapcount; | ||
1522 | 1591 | ||
1523 | mutex_lock(&mapping->i_mmap_mutex); | 1592 | mutex_lock(&mapping->i_mmap_mutex); |
1524 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { | 1593 | vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { |
@@ -1539,64 +1608,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1539 | if (TTU_ACTION(flags) == TTU_MUNLOCK) | 1608 | if (TTU_ACTION(flags) == TTU_MUNLOCK) |
1540 | goto out; | 1609 | goto out; |
1541 | 1610 | ||
1542 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | 1611 | ret = try_to_unmap_nonlinear(page, mapping, vma); |
1543 | shared.nonlinear) { | ||
1544 | cursor = (unsigned long) vma->vm_private_data; | ||
1545 | if (cursor > max_nl_cursor) | ||
1546 | max_nl_cursor = cursor; | ||
1547 | cursor = vma->vm_end - vma->vm_start; | ||
1548 | if (cursor > max_nl_size) | ||
1549 | max_nl_size = cursor; | ||
1550 | } | ||
1551 | |||
1552 | if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ | ||
1553 | ret = SWAP_FAIL; | ||
1554 | goto out; | ||
1555 | } | ||
1556 | |||
1557 | /* | ||
1558 | * We don't try to search for this page in the nonlinear vmas, | ||
1559 | * and page_referenced wouldn't have found it anyway. Instead | ||
1560 | * just walk the nonlinear vmas trying to age and unmap some. | ||
1561 | * The mapcount of the page we came in with is irrelevant, | ||
1562 | * but even so use it as a guide to how hard we should try? | ||
1563 | */ | ||
1564 | mapcount = page_mapcount(page); | ||
1565 | if (!mapcount) | ||
1566 | goto out; | ||
1567 | cond_resched(); | ||
1568 | |||
1569 | max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; | ||
1570 | if (max_nl_cursor == 0) | ||
1571 | max_nl_cursor = CLUSTER_SIZE; | ||
1572 | |||
1573 | do { | ||
1574 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, | ||
1575 | shared.nonlinear) { | ||
1576 | cursor = (unsigned long) vma->vm_private_data; | ||
1577 | while ( cursor < max_nl_cursor && | ||
1578 | cursor < vma->vm_end - vma->vm_start) { | ||
1579 | if (try_to_unmap_cluster(cursor, &mapcount, | ||
1580 | vma, page) == SWAP_MLOCK) | ||
1581 | ret = SWAP_MLOCK; | ||
1582 | cursor += CLUSTER_SIZE; | ||
1583 | vma->vm_private_data = (void *) cursor; | ||
1584 | if ((int)mapcount <= 0) | ||
1585 | goto out; | ||
1586 | } | ||
1587 | vma->vm_private_data = (void *) max_nl_cursor; | ||
1588 | } | ||
1589 | cond_resched(); | ||
1590 | max_nl_cursor += CLUSTER_SIZE; | ||
1591 | } while (max_nl_cursor <= max_nl_size); | ||
1592 | |||
1593 | /* | ||
1594 | * Don't loop forever (perhaps all the remaining pages are | ||
1595 | * in locked vmas). Reset cursor on all unreserved nonlinear | ||
1596 | * vmas, now forgetting on which ones it had fallen behind. | ||
1597 | */ | ||
1598 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) | ||
1599 | vma->vm_private_data = NULL; | ||
1600 | out: | 1612 | out: |
1601 | mutex_unlock(&mapping->i_mmap_mutex); | 1613 | mutex_unlock(&mapping->i_mmap_mutex); |
1602 | return ret; | 1614 | return ret; |