summaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 19:10:23 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 19:10:23 -0400
commit9c9fa97a8edbc3668dfc7a25de516e80c146e86f (patch)
tree2dc0e90203796a4b346ce190f9521c3294104058 /mm/shmem.c
parent5184d449600f501a8688069f35c138c6b3bf8b94 (diff)
parent2b38d01b4de8b1bbda7f5f7e91252609557635fc (diff)
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few hot fixes - ocfs2 updates - almost all of -mm (slab-generic, slab, slub, kmemleak, kasan, cleanups, debug, pagecache, memcg, gup, pagemap, memory-hotplug, sparsemem, vmalloc, initialization, z3fold, compaction, mempolicy, oom-kill, hugetlb, migration, thp, mmap, madvise, shmem, zswap, zsmalloc) * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (132 commits) mm/zsmalloc.c: fix a -Wunused-function warning zswap: do not map same object twice zswap: use movable memory if zpool support allocate movable memory zpool: add malloc_support_movable to zpool_driver shmem: fix obsolete comment in shmem_getpage_gfp() mm/madvise: reduce code duplication in error handling paths mm: mmap: increase sockets maximum memory size pgoff for 32bits mm/mmap.c: refine find_vma_prev() with rb_last() riscv: make mmap allocation top-down by default mips: use generic mmap top-down layout and brk randomization mips: replace arch specific way to determine 32bit task with generic version mips: adjust brk randomization offset to fit generic version mips: use STACK_TOP when computing mmap base address mips: properly account for stack randomization and stack guard gap arm: use generic mmap top-down layout and brk randomization arm: use STACK_TOP when computing mmap base address arm: properly account for stack randomization and stack guard gap arm64, mm: make randomization selected by generic topdown mmap layout arm64, mm: move generic mmap layout functions to mm arm64: consider stack randomization for mmap base only when necessary ...
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 0f7fd4a85db6..30ce722c23fa 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -609,7 +609,7 @@ static int shmem_add_to_page_cache(struct page *page,
609{ 609{
610 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 610 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
611 unsigned long i = 0; 611 unsigned long i = 0;
612 unsigned long nr = 1UL << compound_order(page); 612 unsigned long nr = compound_nr(page);
613 613
614 VM_BUG_ON_PAGE(PageTail(page), page); 614 VM_BUG_ON_PAGE(PageTail(page), page);
615 VM_BUG_ON_PAGE(index != round_down(index, nr), page); 615 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
@@ -631,7 +631,7 @@ static int shmem_add_to_page_cache(struct page *page,
631 if (xas_error(&xas)) 631 if (xas_error(&xas))
632 goto unlock; 632 goto unlock;
633next: 633next:
634 xas_store(&xas, page + i); 634 xas_store(&xas, page);
635 if (++i < nr) { 635 if (++i < nr) {
636 xas_next(&xas); 636 xas_next(&xas);
637 goto next; 637 goto next;
@@ -1734,7 +1734,7 @@ unlock:
1734 * vm. If we swap it in we mark it dirty since we also free the swap 1734 * vm. If we swap it in we mark it dirty since we also free the swap
1735 * entry since a page cannot live in both the swap and page cache. 1735 * entry since a page cannot live in both the swap and page cache.
1736 * 1736 *
1737 * fault_mm and fault_type are only supplied by shmem_fault: 1737 * vmf and fault_type are only supplied by shmem_fault:
1738 * otherwise they are NULL. 1738 * otherwise they are NULL.
1739 */ 1739 */
1740static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1740static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
@@ -1884,7 +1884,7 @@ alloc_nohuge:
1884 lru_cache_add_anon(page); 1884 lru_cache_add_anon(page);
1885 1885
1886 spin_lock_irq(&info->lock); 1886 spin_lock_irq(&info->lock);
1887 info->alloced += 1 << compound_order(page); 1887 info->alloced += compound_nr(page);
1888 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 1888 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1889 shmem_recalc_inode(inode); 1889 shmem_recalc_inode(inode);
1890 spin_unlock_irq(&info->lock); 1890 spin_unlock_irq(&info->lock);
@@ -1925,7 +1925,7 @@ clear:
1925 struct page *head = compound_head(page); 1925 struct page *head = compound_head(page);
1926 int i; 1926 int i;
1927 1927
1928 for (i = 0; i < (1 << compound_order(head)); i++) { 1928 for (i = 0; i < compound_nr(head); i++) {
1929 clear_highpage(head + i); 1929 clear_highpage(head + i);
1930 flush_dcache_page(head + i); 1930 flush_dcache_page(head + i);
1931 } 1931 }
@@ -1952,7 +1952,7 @@ clear:
1952 * Error recovery. 1952 * Error recovery.
1953 */ 1953 */
1954unacct: 1954unacct:
1955 shmem_inode_unacct_blocks(inode, 1 << compound_order(page)); 1955 shmem_inode_unacct_blocks(inode, compound_nr(page));
1956 1956
1957 if (PageTransHuge(page)) { 1957 if (PageTransHuge(page)) {
1958 unlock_page(page); 1958 unlock_page(page);