summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/pagemap.h10
-rw-r--r--mm/filemap.c145
-rw-r--r--mm/huge_memory.c22
-rw-r--r--mm/khugepaged.c4
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swap_state.c4
8 files changed, 95 insertions, 96 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c7552459a15f..37a4d9e32cd3 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -333,6 +333,16 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
333 mapping_gfp_mask(mapping)); 333 mapping_gfp_mask(mapping));
334} 334}
335 335
336static inline struct page *find_subpage(struct page *page, pgoff_t offset)
337{
338 if (PageHuge(page))
339 return page;
340
341 VM_BUG_ON_PAGE(PageTail(page), page);
342
343 return page + (offset & (compound_nr(page) - 1));
344}
345
336struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); 346struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
337struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); 347struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
338unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 348unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
diff --git a/mm/filemap.c b/mm/filemap.c
index d5462d706f76..533f271d6839 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -281,11 +281,11 @@ EXPORT_SYMBOL(delete_from_page_cache);
281 * @pvec: pagevec with pages to delete 281 * @pvec: pagevec with pages to delete
282 * 282 *
283 * The function walks over mapping->i_pages and removes pages passed in @pvec 283 * The function walks over mapping->i_pages and removes pages passed in @pvec
284 * from the mapping. The function expects @pvec to be sorted by page index. 284 * from the mapping. The function expects @pvec to be sorted by page index
285 * and is optimised for it to be dense.
285 * It tolerates holes in @pvec (mapping entries at those indices are not 286 * It tolerates holes in @pvec (mapping entries at those indices are not
286 * modified). The function expects only THP head pages to be present in the 287 * modified). The function expects only THP head pages to be present in the
287 * @pvec and takes care to delete all corresponding tail pages from the 288 * @pvec.
288 * mapping as well.
289 * 289 *
290 * The function expects the i_pages lock to be held. 290 * The function expects the i_pages lock to be held.
291 */ 291 */
@@ -294,40 +294,43 @@ static void page_cache_delete_batch(struct address_space *mapping,
294{ 294{
295 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); 295 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
296 int total_pages = 0; 296 int total_pages = 0;
297 int i = 0, tail_pages = 0; 297 int i = 0;
298 struct page *page; 298 struct page *page;
299 299
300 mapping_set_update(&xas, mapping); 300 mapping_set_update(&xas, mapping);
301 xas_for_each(&xas, page, ULONG_MAX) { 301 xas_for_each(&xas, page, ULONG_MAX) {
302 if (i >= pagevec_count(pvec) && !tail_pages) 302 if (i >= pagevec_count(pvec))
303 break; 303 break;
304
305 /* A swap/dax/shadow entry got inserted? Skip it. */
304 if (xa_is_value(page)) 306 if (xa_is_value(page))
305 continue; 307 continue;
306 if (!tail_pages) { 308 /*
307 /* 309 * A page got inserted in our range? Skip it. We have our
308 * Some page got inserted in our range? Skip it. We 310 * pages locked so they are protected from being removed.
309 * have our pages locked so they are protected from 311 * If we see a page whose index is higher than ours, it
310 * being removed. 312 * means our page has been removed, which shouldn't be
311 */ 313 * possible because we're holding the PageLock.
312 if (page != pvec->pages[i]) { 314 */
313 VM_BUG_ON_PAGE(page->index > 315 if (page != pvec->pages[i]) {
314 pvec->pages[i]->index, page); 316 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
315 continue; 317 page);
316 } 318 continue;
317 WARN_ON_ONCE(!PageLocked(page)); 319 }
318 if (PageTransHuge(page) && !PageHuge(page)) 320
319 tail_pages = HPAGE_PMD_NR - 1; 321 WARN_ON_ONCE(!PageLocked(page));
322
323 if (page->index == xas.xa_index)
320 page->mapping = NULL; 324 page->mapping = NULL;
321 /* 325 /* Leave page->index set: truncation lookup relies on it */
322 * Leave page->index set: truncation lookup relies 326
323 * upon it 327 /*
324 */ 328 * Move to the next page in the vector if this is a regular
329 * page or the index is of the last sub-page of this compound
330 * page.
331 */
332 if (page->index + compound_nr(page) - 1 == xas.xa_index)
325 i++; 333 i++;
326 } else {
327 VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
328 != pvec->pages[i]->index, page);
329 tail_pages--;
330 }
331 xas_store(&xas, NULL); 334 xas_store(&xas, NULL);
332 total_pages++; 335 total_pages++;
333 } 336 }
@@ -1520,7 +1523,7 @@ EXPORT_SYMBOL(page_cache_prev_miss);
1520struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1523struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1521{ 1524{
1522 XA_STATE(xas, &mapping->i_pages, offset); 1525 XA_STATE(xas, &mapping->i_pages, offset);
1523 struct page *head, *page; 1526 struct page *page;
1524 1527
1525 rcu_read_lock(); 1528 rcu_read_lock();
1526repeat: 1529repeat:
@@ -1535,25 +1538,19 @@ repeat:
1535 if (!page || xa_is_value(page)) 1538 if (!page || xa_is_value(page))
1536 goto out; 1539 goto out;
1537 1540
1538 head = compound_head(page); 1541 if (!page_cache_get_speculative(page))
1539 if (!page_cache_get_speculative(head))
1540 goto repeat;
1541
1542 /* The page was split under us? */
1543 if (compound_head(page) != head) {
1544 put_page(head);
1545 goto repeat; 1542 goto repeat;
1546 }
1547 1543
1548 /* 1544 /*
1549 * Has the page moved? 1545 * Has the page moved or been split?
1550 * This is part of the lockless pagecache protocol. See 1546 * This is part of the lockless pagecache protocol. See
1551 * include/linux/pagemap.h for details. 1547 * include/linux/pagemap.h for details.
1552 */ 1548 */
1553 if (unlikely(page != xas_reload(&xas))) { 1549 if (unlikely(page != xas_reload(&xas))) {
1554 put_page(head); 1550 put_page(page);
1555 goto repeat; 1551 goto repeat;
1556 } 1552 }
1553 page = find_subpage(page, offset);
1557out: 1554out:
1558 rcu_read_unlock(); 1555 rcu_read_unlock();
1559 1556
@@ -1735,7 +1732,6 @@ unsigned find_get_entries(struct address_space *mapping,
1735 1732
1736 rcu_read_lock(); 1733 rcu_read_lock();
1737 xas_for_each(&xas, page, ULONG_MAX) { 1734 xas_for_each(&xas, page, ULONG_MAX) {
1738 struct page *head;
1739 if (xas_retry(&xas, page)) 1735 if (xas_retry(&xas, page))
1740 continue; 1736 continue;
1741 /* 1737 /*
@@ -1746,17 +1742,13 @@ unsigned find_get_entries(struct address_space *mapping,
1746 if (xa_is_value(page)) 1742 if (xa_is_value(page))
1747 goto export; 1743 goto export;
1748 1744
1749 head = compound_head(page); 1745 if (!page_cache_get_speculative(page))
1750 if (!page_cache_get_speculative(head))
1751 goto retry; 1746 goto retry;
1752 1747
1753 /* The page was split under us? */ 1748 /* Has the page moved or been split? */
1754 if (compound_head(page) != head)
1755 goto put_page;
1756
1757 /* Has the page moved? */
1758 if (unlikely(page != xas_reload(&xas))) 1749 if (unlikely(page != xas_reload(&xas)))
1759 goto put_page; 1750 goto put_page;
1751 page = find_subpage(page, xas.xa_index);
1760 1752
1761export: 1753export:
1762 indices[ret] = xas.xa_index; 1754 indices[ret] = xas.xa_index;
@@ -1765,7 +1757,7 @@ export:
1765 break; 1757 break;
1766 continue; 1758 continue;
1767put_page: 1759put_page:
1768 put_page(head); 1760 put_page(page);
1769retry: 1761retry:
1770 xas_reset(&xas); 1762 xas_reset(&xas);
1771 } 1763 }
@@ -1807,33 +1799,27 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1807 1799
1808 rcu_read_lock(); 1800 rcu_read_lock();
1809 xas_for_each(&xas, page, end) { 1801 xas_for_each(&xas, page, end) {
1810 struct page *head;
1811 if (xas_retry(&xas, page)) 1802 if (xas_retry(&xas, page))
1812 continue; 1803 continue;
1813 /* Skip over shadow, swap and DAX entries */ 1804 /* Skip over shadow, swap and DAX entries */
1814 if (xa_is_value(page)) 1805 if (xa_is_value(page))
1815 continue; 1806 continue;
1816 1807
1817 head = compound_head(page); 1808 if (!page_cache_get_speculative(page))
1818 if (!page_cache_get_speculative(head))
1819 goto retry; 1809 goto retry;
1820 1810
1821 /* The page was split under us? */ 1811 /* Has the page moved or been split? */
1822 if (compound_head(page) != head)
1823 goto put_page;
1824
1825 /* Has the page moved? */
1826 if (unlikely(page != xas_reload(&xas))) 1812 if (unlikely(page != xas_reload(&xas)))
1827 goto put_page; 1813 goto put_page;
1828 1814
1829 pages[ret] = page; 1815 pages[ret] = find_subpage(page, xas.xa_index);
1830 if (++ret == nr_pages) { 1816 if (++ret == nr_pages) {
1831 *start = xas.xa_index + 1; 1817 *start = xas.xa_index + 1;
1832 goto out; 1818 goto out;
1833 } 1819 }
1834 continue; 1820 continue;
1835put_page: 1821put_page:
1836 put_page(head); 1822 put_page(page);
1837retry: 1823retry:
1838 xas_reset(&xas); 1824 xas_reset(&xas);
1839 } 1825 }
@@ -1878,7 +1864,6 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1878 1864
1879 rcu_read_lock(); 1865 rcu_read_lock();
1880 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1866 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1881 struct page *head;
1882 if (xas_retry(&xas, page)) 1867 if (xas_retry(&xas, page))
1883 continue; 1868 continue;
1884 /* 1869 /*
@@ -1888,24 +1873,19 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1888 if (xa_is_value(page)) 1873 if (xa_is_value(page))
1889 break; 1874 break;
1890 1875
1891 head = compound_head(page); 1876 if (!page_cache_get_speculative(page))
1892 if (!page_cache_get_speculative(head))
1893 goto retry; 1877 goto retry;
1894 1878
1895 /* The page was split under us? */ 1879 /* Has the page moved or been split? */
1896 if (compound_head(page) != head)
1897 goto put_page;
1898
1899 /* Has the page moved? */
1900 if (unlikely(page != xas_reload(&xas))) 1880 if (unlikely(page != xas_reload(&xas)))
1901 goto put_page; 1881 goto put_page;
1902 1882
1903 pages[ret] = page; 1883 pages[ret] = find_subpage(page, xas.xa_index);
1904 if (++ret == nr_pages) 1884 if (++ret == nr_pages)
1905 break; 1885 break;
1906 continue; 1886 continue;
1907put_page: 1887put_page:
1908 put_page(head); 1888 put_page(page);
1909retry: 1889retry:
1910 xas_reset(&xas); 1890 xas_reset(&xas);
1911 } 1891 }
@@ -1941,7 +1921,6 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1941 1921
1942 rcu_read_lock(); 1922 rcu_read_lock();
1943 xas_for_each_marked(&xas, page, end, tag) { 1923 xas_for_each_marked(&xas, page, end, tag) {
1944 struct page *head;
1945 if (xas_retry(&xas, page)) 1924 if (xas_retry(&xas, page))
1946 continue; 1925 continue;
1947 /* 1926 /*
@@ -1952,26 +1931,21 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1952 if (xa_is_value(page)) 1931 if (xa_is_value(page))
1953 continue; 1932 continue;
1954 1933
1955 head = compound_head(page); 1934 if (!page_cache_get_speculative(page))
1956 if (!page_cache_get_speculative(head))
1957 goto retry; 1935 goto retry;
1958 1936
1959 /* The page was split under us? */ 1937 /* Has the page moved or been split? */
1960 if (compound_head(page) != head)
1961 goto put_page;
1962
1963 /* Has the page moved? */
1964 if (unlikely(page != xas_reload(&xas))) 1938 if (unlikely(page != xas_reload(&xas)))
1965 goto put_page; 1939 goto put_page;
1966 1940
1967 pages[ret] = page; 1941 pages[ret] = find_subpage(page, xas.xa_index);
1968 if (++ret == nr_pages) { 1942 if (++ret == nr_pages) {
1969 *index = xas.xa_index + 1; 1943 *index = xas.xa_index + 1;
1970 goto out; 1944 goto out;
1971 } 1945 }
1972 continue; 1946 continue;
1973put_page: 1947put_page:
1974 put_page(head); 1948 put_page(page);
1975retry: 1949retry:
1976 xas_reset(&xas); 1950 xas_reset(&xas);
1977 } 1951 }
@@ -2652,7 +2626,7 @@ void filemap_map_pages(struct vm_fault *vmf,
2652 pgoff_t last_pgoff = start_pgoff; 2626 pgoff_t last_pgoff = start_pgoff;
2653 unsigned long max_idx; 2627 unsigned long max_idx;
2654 XA_STATE(xas, &mapping->i_pages, start_pgoff); 2628 XA_STATE(xas, &mapping->i_pages, start_pgoff);
2655 struct page *head, *page; 2629 struct page *page;
2656 2630
2657 rcu_read_lock(); 2631 rcu_read_lock();
2658 xas_for_each(&xas, page, end_pgoff) { 2632 xas_for_each(&xas, page, end_pgoff) {
@@ -2661,24 +2635,19 @@ void filemap_map_pages(struct vm_fault *vmf,
2661 if (xa_is_value(page)) 2635 if (xa_is_value(page))
2662 goto next; 2636 goto next;
2663 2637
2664 head = compound_head(page);
2665
2666 /* 2638 /*
2667 * Check for a locked page first, as a speculative 2639 * Check for a locked page first, as a speculative
2668 * reference may adversely influence page migration. 2640 * reference may adversely influence page migration.
2669 */ 2641 */
2670 if (PageLocked(head)) 2642 if (PageLocked(page))
2671 goto next; 2643 goto next;
2672 if (!page_cache_get_speculative(head)) 2644 if (!page_cache_get_speculative(page))
2673 goto next; 2645 goto next;
2674 2646
2675 /* The page was split under us? */ 2647 /* Has the page moved or been split? */
2676 if (compound_head(page) != head)
2677 goto skip;
2678
2679 /* Has the page moved? */
2680 if (unlikely(page != xas_reload(&xas))) 2648 if (unlikely(page != xas_reload(&xas)))
2681 goto skip; 2649 goto skip;
2650 page = find_subpage(page, xas.xa_index);
2682 2651
2683 if (!PageUptodate(page) || 2652 if (!PageUptodate(page) ||
2684 PageReadahead(page) || 2653 PageReadahead(page) ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index de1f15969e27..483b07b2d6ae 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2497,6 +2497,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2497 struct page *head = compound_head(page); 2497 struct page *head = compound_head(page);
2498 pg_data_t *pgdat = page_pgdat(head); 2498 pg_data_t *pgdat = page_pgdat(head);
2499 struct lruvec *lruvec; 2499 struct lruvec *lruvec;
2500 struct address_space *swap_cache = NULL;
2501 unsigned long offset = 0;
2500 int i; 2502 int i;
2501 2503
2502 lruvec = mem_cgroup_page_lruvec(head, pgdat); 2504 lruvec = mem_cgroup_page_lruvec(head, pgdat);
@@ -2504,6 +2506,14 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2504 /* complete memcg works before add pages to LRU */ 2506 /* complete memcg works before add pages to LRU */
2505 mem_cgroup_split_huge_fixup(head); 2507 mem_cgroup_split_huge_fixup(head);
2506 2508
2509 if (PageAnon(head) && PageSwapCache(head)) {
2510 swp_entry_t entry = { .val = page_private(head) };
2511
2512 offset = swp_offset(entry);
2513 swap_cache = swap_address_space(entry);
2514 xa_lock(&swap_cache->i_pages);
2515 }
2516
2507 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { 2517 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
2508 __split_huge_page_tail(head, i, lruvec, list); 2518 __split_huge_page_tail(head, i, lruvec, list);
2509 /* Some pages can be beyond i_size: drop them from page cache */ 2519 /* Some pages can be beyond i_size: drop them from page cache */
@@ -2513,6 +2523,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2513 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2523 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2514 shmem_uncharge(head->mapping->host, 1); 2524 shmem_uncharge(head->mapping->host, 1);
2515 put_page(head + i); 2525 put_page(head + i);
2526 } else if (!PageAnon(page)) {
2527 __xa_store(&head->mapping->i_pages, head[i].index,
2528 head + i, 0);
2529 } else if (swap_cache) {
2530 __xa_store(&swap_cache->i_pages, offset + i,
2531 head + i, 0);
2516 } 2532 }
2517 } 2533 }
2518 2534
@@ -2523,10 +2539,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2523 /* See comment in __split_huge_page_tail() */ 2539 /* See comment in __split_huge_page_tail() */
2524 if (PageAnon(head)) { 2540 if (PageAnon(head)) {
2525 /* Additional pin to swap cache */ 2541 /* Additional pin to swap cache */
2526 if (PageSwapCache(head)) 2542 if (PageSwapCache(head)) {
2527 page_ref_add(head, 2); 2543 page_ref_add(head, 2);
2528 else 2544 xa_unlock(&swap_cache->i_pages);
2545 } else {
2529 page_ref_inc(head); 2546 page_ref_inc(head);
2547 }
2530 } else { 2548 } else {
2531 /* Additional pin to page cache */ 2549 /* Additional pin to page cache */
2532 page_ref_add(head, 2); 2550 page_ref_add(head, 2);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ccede2425c3f..04a54ff5a8ac 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1378,7 +1378,7 @@ static void collapse_shmem(struct mm_struct *mm,
1378 result = SCAN_FAIL; 1378 result = SCAN_FAIL;
1379 goto xa_locked; 1379 goto xa_locked;
1380 } 1380 }
1381 xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); 1381 xas_store(&xas, new_page);
1382 nr_none++; 1382 nr_none++;
1383 continue; 1383 continue;
1384 } 1384 }
@@ -1454,7 +1454,7 @@ static void collapse_shmem(struct mm_struct *mm,
1454 list_add_tail(&page->lru, &pagelist); 1454 list_add_tail(&page->lru, &pagelist);
1455 1455
1456 /* Finally, replace with the new page. */ 1456 /* Finally, replace with the new page. */
1457 xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); 1457 xas_store(&xas, new_page);
1458 continue; 1458 continue;
1459out_unlock: 1459out_unlock:
1460 unlock_page(page); 1460 unlock_page(page);
diff --git a/mm/memfd.c b/mm/memfd.c
index 650e65a46b9c..2647c898990c 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -39,6 +39,7 @@ static void memfd_tag_pins(struct xa_state *xas)
39 xas_for_each(xas, page, ULONG_MAX) { 39 xas_for_each(xas, page, ULONG_MAX) {
40 if (xa_is_value(page)) 40 if (xa_is_value(page))
41 continue; 41 continue;
42 page = find_subpage(page, xas->xa_index);
42 if (page_count(page) - page_mapcount(page) > 1) 43 if (page_count(page) - page_mapcount(page) > 1)
43 xas_set_mark(xas, MEMFD_TAG_PINNED); 44 xas_set_mark(xas, MEMFD_TAG_PINNED);
44 45
@@ -88,6 +89,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
88 bool clear = true; 89 bool clear = true;
89 if (xa_is_value(page)) 90 if (xa_is_value(page))
90 continue; 91 continue;
92 page = find_subpage(page, xas.xa_index);
91 if (page_count(page) - page_mapcount(page) != 1) { 93 if (page_count(page) - page_mapcount(page) != 1) {
92 /* 94 /*
93 * On the last scan, we clean up all those tags 95 * On the last scan, we clean up all those tags
diff --git a/mm/migrate.c b/mm/migrate.c
index aa72b49e0209..374ef2fcb722 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -460,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
460 460
461 for (i = 1; i < HPAGE_PMD_NR; i++) { 461 for (i = 1; i < HPAGE_PMD_NR; i++) {
462 xas_next(&xas); 462 xas_next(&xas);
463 xas_store(&xas, newpage + i); 463 xas_store(&xas, newpage);
464 } 464 }
465 } 465 }
466 466
diff --git a/mm/shmem.c b/mm/shmem.c
index 15d26c86e5ef..57a6aedf6649 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -631,7 +631,7 @@ static int shmem_add_to_page_cache(struct page *page,
631 if (xas_error(&xas)) 631 if (xas_error(&xas))
632 goto unlock; 632 goto unlock;
633next: 633next:
634 xas_store(&xas, page + i); 634 xas_store(&xas, page);
635 if (++i < nr) { 635 if (++i < nr) {
636 xas_next(&xas); 636 xas_next(&xas);
637 goto next; 637 goto next;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f844af5f09ba..8e7ce9a9bc5e 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -133,7 +133,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
133 for (i = 0; i < nr; i++) { 133 for (i = 0; i < nr; i++) {
134 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 134 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
135 set_page_private(page + i, entry.val + i); 135 set_page_private(page + i, entry.val + i);
136 xas_store(&xas, page + i); 136 xas_store(&xas, page);
137 xas_next(&xas); 137 xas_next(&xas);
138 } 138 }
139 address_space->nrpages += nr; 139 address_space->nrpages += nr;
@@ -168,7 +168,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
168 168
169 for (i = 0; i < nr; i++) { 169 for (i = 0; i < nr; i++) {
170 void *entry = xas_store(&xas, NULL); 170 void *entry = xas_store(&xas, NULL);
171 VM_BUG_ON_PAGE(entry != page + i, entry); 171 VM_BUG_ON_PAGE(entry != page, entry);
172 set_page_private(page + i, 0); 172 set_page_private(page + i, 0);
173 xas_next(&xas); 173 xas_next(&xas);
174 } 174 }