aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/proc/task_mmu.c42
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--mm/shmem.c65
3 files changed, 53 insertions, 56 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5830b2e129ed..8a03759bda38 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -488,42 +488,16 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
488} 488}
489 489
490#ifdef CONFIG_SHMEM 490#ifdef CONFIG_SHMEM
491static unsigned long smaps_shmem_swap(struct vm_area_struct *vma,
492 unsigned long addr)
493{
494 struct page *page;
495
496 page = find_get_entry(vma->vm_file->f_mapping,
497 linear_page_index(vma, addr));
498 if (!page)
499 return 0;
500
501 if (radix_tree_exceptional_entry(page))
502 return PAGE_SIZE;
503
504 page_cache_release(page);
505 return 0;
506
507}
508
509static int smaps_pte_hole(unsigned long addr, unsigned long end, 491static int smaps_pte_hole(unsigned long addr, unsigned long end,
510 struct mm_walk *walk) 492 struct mm_walk *walk)
511{ 493{
512 struct mem_size_stats *mss = walk->private; 494 struct mem_size_stats *mss = walk->private;
513 495
514 while (addr < end) { 496 mss->swap += shmem_partial_swap_usage(
515 mss->swap += smaps_shmem_swap(walk->vma, addr); 497 walk->vma->vm_file->f_mapping, addr, end);
516 addr += PAGE_SIZE;
517 }
518 498
519 return 0; 499 return 0;
520} 500}
521#else
522static unsigned long smaps_shmem_swap(struct vm_area_struct *vma,
523 unsigned long addr)
524{
525 return 0;
526}
527#endif 501#endif
528 502
529static void smaps_pte_entry(pte_t *pte, unsigned long addr, 503static void smaps_pte_entry(pte_t *pte, unsigned long addr,
@@ -555,7 +529,17 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
555 page = migration_entry_to_page(swpent); 529 page = migration_entry_to_page(swpent);
556 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap 530 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
557 && pte_none(*pte))) { 531 && pte_none(*pte))) {
558 mss->swap += smaps_shmem_swap(vma, addr); 532 page = find_get_entry(vma->vm_file->f_mapping,
533 linear_page_index(vma, addr));
534 if (!page)
535 return;
536
537 if (radix_tree_exceptional_entry(page))
538 mss->swap += PAGE_SIZE;
539 else
540 page_cache_release(page);
541
542 return;
559 } 543 }
560 544
561 if (!page) 545 if (!page)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index bd58be5e7a2a..a43f41cb3c43 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -61,6 +61,8 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
61extern int shmem_unuse(swp_entry_t entry, struct page *page); 61extern int shmem_unuse(swp_entry_t entry, struct page *page);
62 62
63extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); 63extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
64extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
65 pgoff_t start, pgoff_t end);
64 66
65static inline struct page *shmem_read_mapping_page( 67static inline struct page *shmem_read_mapping_page(
66 struct address_space *mapping, pgoff_t index) 68 struct address_space *mapping, pgoff_t index)
diff --git a/mm/shmem.c b/mm/shmem.c
index e978621de1ef..760d90cf2a41 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -361,41 +361,18 @@ static int shmem_free_swap(struct address_space *mapping,
361 361
362/* 362/*
363 * Determine (in bytes) how many of the shmem object's pages mapped by the 363 * Determine (in bytes) how many of the shmem object's pages mapped by the
364 * given vma is swapped out. 364 * given offsets are swapped out.
365 * 365 *
366 * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 366 * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
367 * as long as the inode doesn't go away and racy results are not a problem. 367 * as long as the inode doesn't go away and racy results are not a problem.
368 */ 368 */
369unsigned long shmem_swap_usage(struct vm_area_struct *vma) 369unsigned long shmem_partial_swap_usage(struct address_space *mapping,
370 pgoff_t start, pgoff_t end)
370{ 371{
371 struct inode *inode = file_inode(vma->vm_file);
372 struct shmem_inode_info *info = SHMEM_I(inode);
373 struct address_space *mapping = inode->i_mapping;
374 unsigned long swapped;
375 pgoff_t start, end;
376 struct radix_tree_iter iter; 372 struct radix_tree_iter iter;
377 void **slot; 373 void **slot;
378 struct page *page; 374 struct page *page;
379 375 unsigned long swapped = 0;
380 /* Be careful as we don't hold info->lock */
381 swapped = READ_ONCE(info->swapped);
382
383 /*
384 * The easier cases are when the shmem object has nothing in swap, or
385 * the vma maps it whole. Then we can simply use the stats that we
386 * already track.
387 */
388 if (!swapped)
389 return 0;
390
391 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
392 return swapped << PAGE_SHIFT;
393
394 swapped = 0;
395
396 /* Here comes the more involved part */
397 start = linear_page_index(vma, vma->vm_start);
398 end = linear_page_index(vma, vma->vm_end);
399 376
400 rcu_read_lock(); 377 rcu_read_lock();
401 378
@@ -430,6 +407,40 @@ restart:
430} 407}
431 408
432/* 409/*
410 * Determine (in bytes) how many of the shmem object's pages mapped by the
411 * given vma is swapped out.
412 *
413 * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
414 * as long as the inode doesn't go away and racy results are not a problem.
415 */
416unsigned long shmem_swap_usage(struct vm_area_struct *vma)
417{
418 struct inode *inode = file_inode(vma->vm_file);
419 struct shmem_inode_info *info = SHMEM_I(inode);
420 struct address_space *mapping = inode->i_mapping;
421 unsigned long swapped;
422
423 /* Be careful as we don't hold info->lock */
424 swapped = READ_ONCE(info->swapped);
425
426 /*
427 * The easier cases are when the shmem object has nothing in swap, or
428 * the vma maps it whole. Then we can simply use the stats that we
429 * already track.
430 */
431 if (!swapped)
432 return 0;
433
434 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
435 return swapped << PAGE_SHIFT;
436
437 /* Here comes the more involved part */
438 return shmem_partial_swap_usage(mapping,
439 linear_page_index(vma, vma->vm_start),
440 linear_page_index(vma, vma->vm_end));
441}
442
443/*
433 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 444 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
434 */ 445 */
435void shmem_unlock_mapping(struct address_space *mapping) 446void shmem_unlock_mapping(struct address_space *mapping)