diff options
author | Hugh Dickins <hughd@google.com> | 2011-08-03 19:21:21 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-08-03 20:25:23 -0400 |
commit | 41ffe5d5ceef7f7ff2ff18e320d88ca6d629efaf (patch) | |
tree | 66ce800fb7911ed037aa574f46729646ce485d0b /mm | |
parent | 285b2c4fdd69ea73b4762785d8c6be83b6c074a6 (diff) |
tmpfs: miscellaneous trivial cleanups
While it's at its least, make a number of boring nitpicky cleanups to
shmem.c, mostly for consistency of variable naming. Things like "swap"
instead of "entry", "pgoff_t index" instead of "unsigned long idx".
And since everything else here is prefixed "shmem_", better change
init_tmpfs() to shmem_init().
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/shmem.c | 216 |
1 files changed, 107 insertions, 109 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 5574b00ca77..24e95ac1605 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/file.h> | 28 | #include <linux/file.h> |
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/percpu_counter.h> | ||
32 | #include <linux/swap.h> | 31 | #include <linux/swap.h> |
33 | 32 | ||
34 | static struct vfsmount *shm_mnt; | 33 | static struct vfsmount *shm_mnt; |
@@ -51,6 +50,7 @@ static struct vfsmount *shm_mnt; | |||
51 | #include <linux/shmem_fs.h> | 50 | #include <linux/shmem_fs.h> |
52 | #include <linux/writeback.h> | 51 | #include <linux/writeback.h> |
53 | #include <linux/blkdev.h> | 52 | #include <linux/blkdev.h> |
53 | #include <linux/percpu_counter.h> | ||
54 | #include <linux/splice.h> | 54 | #include <linux/splice.h> |
55 | #include <linux/security.h> | 55 | #include <linux/security.h> |
56 | #include <linux/swapops.h> | 56 | #include <linux/swapops.h> |
@@ -63,7 +63,6 @@ static struct vfsmount *shm_mnt; | |||
63 | #include <linux/magic.h> | 63 | #include <linux/magic.h> |
64 | 64 | ||
65 | #include <asm/uaccess.h> | 65 | #include <asm/uaccess.h> |
66 | #include <asm/div64.h> | ||
67 | #include <asm/pgtable.h> | 66 | #include <asm/pgtable.h> |
68 | 67 | ||
69 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | 68 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) |
@@ -201,7 +200,7 @@ static void shmem_free_inode(struct super_block *sb) | |||
201 | } | 200 | } |
202 | 201 | ||
203 | /** | 202 | /** |
204 | * shmem_recalc_inode - recalculate the size of an inode | 203 | * shmem_recalc_inode - recalculate the block usage of an inode |
205 | * @inode: inode to recalc | 204 | * @inode: inode to recalc |
206 | * | 205 | * |
207 | * We have to calculate the free blocks since the mm can drop | 206 | * We have to calculate the free blocks since the mm can drop |
@@ -356,19 +355,20 @@ static void shmem_evict_inode(struct inode *inode) | |||
356 | end_writeback(inode); | 355 | end_writeback(inode); |
357 | } | 356 | } |
358 | 357 | ||
359 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) | 358 | static int shmem_unuse_inode(struct shmem_inode_info *info, |
359 | swp_entry_t swap, struct page *page) | ||
360 | { | 360 | { |
361 | struct address_space *mapping = info->vfs_inode.i_mapping; | 361 | struct address_space *mapping = info->vfs_inode.i_mapping; |
362 | unsigned long idx; | 362 | pgoff_t index; |
363 | int error; | 363 | int error; |
364 | 364 | ||
365 | for (idx = 0; idx < SHMEM_NR_DIRECT; idx++) | 365 | for (index = 0; index < SHMEM_NR_DIRECT; index++) |
366 | if (shmem_get_swap(info, idx).val == entry.val) | 366 | if (shmem_get_swap(info, index).val == swap.val) |
367 | goto found; | 367 | goto found; |
368 | return 0; | 368 | return 0; |
369 | found: | 369 | found: |
370 | spin_lock(&info->lock); | 370 | spin_lock(&info->lock); |
371 | if (shmem_get_swap(info, idx).val != entry.val) { | 371 | if (shmem_get_swap(info, index).val != swap.val) { |
372 | spin_unlock(&info->lock); | 372 | spin_unlock(&info->lock); |
373 | return 0; | 373 | return 0; |
374 | } | 374 | } |
@@ -387,15 +387,15 @@ found: | |||
387 | * but also to hold up shmem_evict_inode(): so inode cannot be freed | 387 | * but also to hold up shmem_evict_inode(): so inode cannot be freed |
388 | * beneath us (pagelock doesn't help until the page is in pagecache). | 388 | * beneath us (pagelock doesn't help until the page is in pagecache). |
389 | */ | 389 | */ |
390 | error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT); | 390 | error = add_to_page_cache_locked(page, mapping, index, GFP_NOWAIT); |
391 | /* which does mem_cgroup_uncharge_cache_page on error */ | 391 | /* which does mem_cgroup_uncharge_cache_page on error */ |
392 | 392 | ||
393 | if (error != -ENOMEM) { | 393 | if (error != -ENOMEM) { |
394 | delete_from_swap_cache(page); | 394 | delete_from_swap_cache(page); |
395 | set_page_dirty(page); | 395 | set_page_dirty(page); |
396 | shmem_put_swap(info, idx, (swp_entry_t){0}); | 396 | shmem_put_swap(info, index, (swp_entry_t){0}); |
397 | info->swapped--; | 397 | info->swapped--; |
398 | swap_free(entry); | 398 | swap_free(swap); |
399 | error = 1; /* not an error, but entry was found */ | 399 | error = 1; /* not an error, but entry was found */ |
400 | } | 400 | } |
401 | spin_unlock(&info->lock); | 401 | spin_unlock(&info->lock); |
@@ -405,9 +405,9 @@ found: | |||
405 | /* | 405 | /* |
406 | * shmem_unuse() search for an eventually swapped out shmem page. | 406 | * shmem_unuse() search for an eventually swapped out shmem page. |
407 | */ | 407 | */ |
408 | int shmem_unuse(swp_entry_t entry, struct page *page) | 408 | int shmem_unuse(swp_entry_t swap, struct page *page) |
409 | { | 409 | { |
410 | struct list_head *p, *next; | 410 | struct list_head *this, *next; |
411 | struct shmem_inode_info *info; | 411 | struct shmem_inode_info *info; |
412 | int found = 0; | 412 | int found = 0; |
413 | int error; | 413 | int error; |
@@ -432,8 +432,8 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
432 | radix_tree_preload_end(); | 432 | radix_tree_preload_end(); |
433 | 433 | ||
434 | mutex_lock(&shmem_swaplist_mutex); | 434 | mutex_lock(&shmem_swaplist_mutex); |
435 | list_for_each_safe(p, next, &shmem_swaplist) { | 435 | list_for_each_safe(this, next, &shmem_swaplist) { |
436 | info = list_entry(p, struct shmem_inode_info, swaplist); | 436 | info = list_entry(this, struct shmem_inode_info, swaplist); |
437 | if (!info->swapped) { | 437 | if (!info->swapped) { |
438 | spin_lock(&info->lock); | 438 | spin_lock(&info->lock); |
439 | if (!info->swapped) | 439 | if (!info->swapped) |
@@ -441,7 +441,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page) | |||
441 | spin_unlock(&info->lock); | 441 | spin_unlock(&info->lock); |
442 | } | 442 | } |
443 | if (info->swapped) | 443 | if (info->swapped) |
444 | found = shmem_unuse_inode(info, entry, page); | 444 | found = shmem_unuse_inode(info, swap, page); |
445 | cond_resched(); | 445 | cond_resched(); |
446 | if (found) | 446 | if (found) |
447 | break; | 447 | break; |
@@ -467,7 +467,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
467 | struct shmem_inode_info *info; | 467 | struct shmem_inode_info *info; |
468 | swp_entry_t swap, oswap; | 468 | swp_entry_t swap, oswap; |
469 | struct address_space *mapping; | 469 | struct address_space *mapping; |
470 | unsigned long index; | 470 | pgoff_t index; |
471 | struct inode *inode; | 471 | struct inode *inode; |
472 | 472 | ||
473 | BUG_ON(!PageLocked(page)); | 473 | BUG_ON(!PageLocked(page)); |
@@ -577,35 +577,33 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | |||
577 | } | 577 | } |
578 | #endif /* CONFIG_TMPFS */ | 578 | #endif /* CONFIG_TMPFS */ |
579 | 579 | ||
580 | static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | 580 | static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, |
581 | struct shmem_inode_info *info, unsigned long idx) | 581 | struct shmem_inode_info *info, pgoff_t index) |
582 | { | 582 | { |
583 | struct mempolicy mpol, *spol; | 583 | struct mempolicy mpol, *spol; |
584 | struct vm_area_struct pvma; | 584 | struct vm_area_struct pvma; |
585 | struct page *page; | ||
586 | 585 | ||
587 | spol = mpol_cond_copy(&mpol, | 586 | spol = mpol_cond_copy(&mpol, |
588 | mpol_shared_policy_lookup(&info->policy, idx)); | 587 | mpol_shared_policy_lookup(&info->policy, index)); |
589 | 588 | ||
590 | /* Create a pseudo vma that just contains the policy */ | 589 | /* Create a pseudo vma that just contains the policy */ |
591 | pvma.vm_start = 0; | 590 | pvma.vm_start = 0; |
592 | pvma.vm_pgoff = idx; | 591 | pvma.vm_pgoff = index; |
593 | pvma.vm_ops = NULL; | 592 | pvma.vm_ops = NULL; |
594 | pvma.vm_policy = spol; | 593 | pvma.vm_policy = spol; |
595 | page = swapin_readahead(entry, gfp, &pvma, 0); | 594 | return swapin_readahead(swap, gfp, &pvma, 0); |
596 | return page; | ||
597 | } | 595 | } |
598 | 596 | ||
599 | static struct page *shmem_alloc_page(gfp_t gfp, | 597 | static struct page *shmem_alloc_page(gfp_t gfp, |
600 | struct shmem_inode_info *info, unsigned long idx) | 598 | struct shmem_inode_info *info, pgoff_t index) |
601 | { | 599 | { |
602 | struct vm_area_struct pvma; | 600 | struct vm_area_struct pvma; |
603 | 601 | ||
604 | /* Create a pseudo vma that just contains the policy */ | 602 | /* Create a pseudo vma that just contains the policy */ |
605 | pvma.vm_start = 0; | 603 | pvma.vm_start = 0; |
606 | pvma.vm_pgoff = idx; | 604 | pvma.vm_pgoff = index; |
607 | pvma.vm_ops = NULL; | 605 | pvma.vm_ops = NULL; |
608 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); | 606 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); |
609 | 607 | ||
610 | /* | 608 | /* |
611 | * alloc_page_vma() will drop the shared policy reference | 609 | * alloc_page_vma() will drop the shared policy reference |
@@ -614,19 +612,19 @@ static struct page *shmem_alloc_page(gfp_t gfp, | |||
614 | } | 612 | } |
615 | #else /* !CONFIG_NUMA */ | 613 | #else /* !CONFIG_NUMA */ |
616 | #ifdef CONFIG_TMPFS | 614 | #ifdef CONFIG_TMPFS |
617 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) | 615 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) |
618 | { | 616 | { |
619 | } | 617 | } |
620 | #endif /* CONFIG_TMPFS */ | 618 | #endif /* CONFIG_TMPFS */ |
621 | 619 | ||
622 | static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | 620 | static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, |
623 | struct shmem_inode_info *info, unsigned long idx) | 621 | struct shmem_inode_info *info, pgoff_t index) |
624 | { | 622 | { |
625 | return swapin_readahead(entry, gfp, NULL, 0); | 623 | return swapin_readahead(swap, gfp, NULL, 0); |
626 | } | 624 | } |
627 | 625 | ||
628 | static inline struct page *shmem_alloc_page(gfp_t gfp, | 626 | static inline struct page *shmem_alloc_page(gfp_t gfp, |
629 | struct shmem_inode_info *info, unsigned long idx) | 627 | struct shmem_inode_info *info, pgoff_t index) |
630 | { | 628 | { |
631 | return alloc_page(gfp); | 629 | return alloc_page(gfp); |
632 | } | 630 | } |
@@ -646,7 +644,7 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | |||
646 | * vm. If we swap it in we mark it dirty since we also free the swap | 644 | * vm. If we swap it in we mark it dirty since we also free the swap |
647 | * entry since a page cannot live in both the swap and page cache | 645 | * entry since a page cannot live in both the swap and page cache |
648 | */ | 646 | */ |
649 | static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, | 647 | static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, |
650 | struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) | 648 | struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) |
651 | { | 649 | { |
652 | struct address_space *mapping = inode->i_mapping; | 650 | struct address_space *mapping = inode->i_mapping; |
@@ -657,10 +655,10 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx, | |||
657 | swp_entry_t swap; | 655 | swp_entry_t swap; |
658 | int error; | 656 | int error; |
659 | 657 | ||
660 | if (idx > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) | 658 | if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) |
661 | return -EFBIG; | 659 | return -EFBIG; |
662 | repeat: | 660 | repeat: |
663 | page = find_lock_page(mapping, idx); | 661 | page = find_lock_page(mapping, index); |
664 | if (page) { | 662 | if (page) { |
665 | /* | 663 | /* |
666 | * Once we can get the page lock, it must be uptodate: | 664 | * Once we can get the page lock, it must be uptodate: |
@@ -681,7 +679,7 @@ repeat: | |||
681 | radix_tree_preload_end(); | 679 | radix_tree_preload_end(); |
682 | 680 | ||
683 | if (sgp != SGP_READ && !prealloc_page) { | 681 | if (sgp != SGP_READ && !prealloc_page) { |
684 | prealloc_page = shmem_alloc_page(gfp, info, idx); | 682 | prealloc_page = shmem_alloc_page(gfp, info, index); |
685 | if (prealloc_page) { | 683 | if (prealloc_page) { |
686 | SetPageSwapBacked(prealloc_page); | 684 | SetPageSwapBacked(prealloc_page); |
687 | if (mem_cgroup_cache_charge(prealloc_page, | 685 | if (mem_cgroup_cache_charge(prealloc_page, |
@@ -694,7 +692,7 @@ repeat: | |||
694 | 692 | ||
695 | spin_lock(&info->lock); | 693 | spin_lock(&info->lock); |
696 | shmem_recalc_inode(inode); | 694 | shmem_recalc_inode(inode); |
697 | swap = shmem_get_swap(info, idx); | 695 | swap = shmem_get_swap(info, index); |
698 | if (swap.val) { | 696 | if (swap.val) { |
699 | /* Look it up and read it in.. */ | 697 | /* Look it up and read it in.. */ |
700 | page = lookup_swap_cache(swap); | 698 | page = lookup_swap_cache(swap); |
@@ -703,9 +701,9 @@ repeat: | |||
703 | /* here we actually do the io */ | 701 | /* here we actually do the io */ |
704 | if (fault_type) | 702 | if (fault_type) |
705 | *fault_type |= VM_FAULT_MAJOR; | 703 | *fault_type |= VM_FAULT_MAJOR; |
706 | page = shmem_swapin(swap, gfp, info, idx); | 704 | page = shmem_swapin(swap, gfp, info, index); |
707 | if (!page) { | 705 | if (!page) { |
708 | swp_entry_t nswap = shmem_get_swap(info, idx); | 706 | swp_entry_t nswap = shmem_get_swap(info, index); |
709 | if (nswap.val == swap.val) { | 707 | if (nswap.val == swap.val) { |
710 | error = -ENOMEM; | 708 | error = -ENOMEM; |
711 | goto out; | 709 | goto out; |
@@ -740,7 +738,7 @@ repeat: | |||
740 | } | 738 | } |
741 | 739 | ||
742 | error = add_to_page_cache_locked(page, mapping, | 740 | error = add_to_page_cache_locked(page, mapping, |
743 | idx, GFP_NOWAIT); | 741 | index, GFP_NOWAIT); |
744 | if (error) { | 742 | if (error) { |
745 | spin_unlock(&info->lock); | 743 | spin_unlock(&info->lock); |
746 | if (error == -ENOMEM) { | 744 | if (error == -ENOMEM) { |
@@ -762,14 +760,14 @@ repeat: | |||
762 | } | 760 | } |
763 | 761 | ||
764 | delete_from_swap_cache(page); | 762 | delete_from_swap_cache(page); |
765 | shmem_put_swap(info, idx, (swp_entry_t){0}); | 763 | shmem_put_swap(info, index, (swp_entry_t){0}); |
766 | info->swapped--; | 764 | info->swapped--; |
767 | spin_unlock(&info->lock); | 765 | spin_unlock(&info->lock); |
768 | set_page_dirty(page); | 766 | set_page_dirty(page); |
769 | swap_free(swap); | 767 | swap_free(swap); |
770 | 768 | ||
771 | } else if (sgp == SGP_READ) { | 769 | } else if (sgp == SGP_READ) { |
772 | page = find_get_page(mapping, idx); | 770 | page = find_get_page(mapping, index); |
773 | if (page && !trylock_page(page)) { | 771 | if (page && !trylock_page(page)) { |
774 | spin_unlock(&info->lock); | 772 | spin_unlock(&info->lock); |
775 | wait_on_page_locked(page); | 773 | wait_on_page_locked(page); |
@@ -793,12 +791,12 @@ repeat: | |||
793 | page = prealloc_page; | 791 | page = prealloc_page; |
794 | prealloc_page = NULL; | 792 | prealloc_page = NULL; |
795 | 793 | ||
796 | swap = shmem_get_swap(info, idx); | 794 | swap = shmem_get_swap(info, index); |
797 | if (swap.val) | 795 | if (swap.val) |
798 | mem_cgroup_uncharge_cache_page(page); | 796 | mem_cgroup_uncharge_cache_page(page); |
799 | else | 797 | else |
800 | error = add_to_page_cache_lru(page, mapping, | 798 | error = add_to_page_cache_lru(page, mapping, |
801 | idx, GFP_NOWAIT); | 799 | index, GFP_NOWAIT); |
802 | /* | 800 | /* |
803 | * At add_to_page_cache_lru() failure, | 801 | * At add_to_page_cache_lru() failure, |
804 | * uncharge will be done automatically. | 802 | * uncharge will be done automatically. |
@@ -841,7 +839,7 @@ nospace: | |||
841 | * but must also avoid reporting a spurious ENOSPC while working on a | 839 | * but must also avoid reporting a spurious ENOSPC while working on a |
842 | * full tmpfs. | 840 | * full tmpfs. |
843 | */ | 841 | */ |
844 | page = find_get_page(mapping, idx); | 842 | page = find_get_page(mapping, index); |
845 | spin_unlock(&info->lock); | 843 | spin_unlock(&info->lock); |
846 | if (page) { | 844 | if (page) { |
847 | page_cache_release(page); | 845 | page_cache_release(page); |
@@ -872,20 +870,20 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
872 | } | 870 | } |
873 | 871 | ||
874 | #ifdef CONFIG_NUMA | 872 | #ifdef CONFIG_NUMA |
875 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) | 873 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) |
876 | { | 874 | { |
877 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; | 875 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
878 | return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); | 876 | return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); |
879 | } | 877 | } |
880 | 878 | ||
881 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, | 879 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, |
882 | unsigned long addr) | 880 | unsigned long addr) |
883 | { | 881 | { |
884 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; | 882 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
885 | unsigned long idx; | 883 | pgoff_t index; |
886 | 884 | ||
887 | idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 885 | index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
888 | return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); | 886 | return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); |
889 | } | 887 | } |
890 | #endif | 888 | #endif |
891 | 889 | ||
@@ -1016,7 +1014,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ | |||
1016 | { | 1014 | { |
1017 | struct inode *inode = filp->f_path.dentry->d_inode; | 1015 | struct inode *inode = filp->f_path.dentry->d_inode; |
1018 | struct address_space *mapping = inode->i_mapping; | 1016 | struct address_space *mapping = inode->i_mapping; |
1019 | unsigned long index, offset; | 1017 | pgoff_t index; |
1018 | unsigned long offset; | ||
1020 | enum sgp_type sgp = SGP_READ; | 1019 | enum sgp_type sgp = SGP_READ; |
1021 | 1020 | ||
1022 | /* | 1021 | /* |
@@ -1032,7 +1031,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ | |||
1032 | 1031 | ||
1033 | for (;;) { | 1032 | for (;;) { |
1034 | struct page *page = NULL; | 1033 | struct page *page = NULL; |
1035 | unsigned long end_index, nr, ret; | 1034 | pgoff_t end_index; |
1035 | unsigned long nr, ret; | ||
1036 | loff_t i_size = i_size_read(inode); | 1036 | loff_t i_size = i_size_read(inode); |
1037 | 1037 | ||
1038 | end_index = i_size >> PAGE_CACHE_SHIFT; | 1038 | end_index = i_size >> PAGE_CACHE_SHIFT; |
@@ -1270,8 +1270,9 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
1270 | buf->f_namelen = NAME_MAX; | 1270 | buf->f_namelen = NAME_MAX; |
1271 | if (sbinfo->max_blocks) { | 1271 | if (sbinfo->max_blocks) { |
1272 | buf->f_blocks = sbinfo->max_blocks; | 1272 | buf->f_blocks = sbinfo->max_blocks; |
1273 | buf->f_bavail = buf->f_bfree = | 1273 | buf->f_bavail = |
1274 | sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks); | 1274 | buf->f_bfree = sbinfo->max_blocks - |
1275 | percpu_counter_sum(&sbinfo->used_blocks); | ||
1275 | } | 1276 | } |
1276 | if (sbinfo->max_inodes) { | 1277 | if (sbinfo->max_inodes) { |
1277 | buf->f_files = sbinfo->max_inodes; | 1278 | buf->f_files = sbinfo->max_inodes; |
@@ -1480,8 +1481,8 @@ static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *n | |||
1480 | static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) | 1481 | static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) |
1481 | { | 1482 | { |
1482 | struct page *page = NULL; | 1483 | struct page *page = NULL; |
1483 | int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); | 1484 | int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); |
1484 | nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); | 1485 | nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); |
1485 | if (page) | 1486 | if (page) |
1486 | unlock_page(page); | 1487 | unlock_page(page); |
1487 | return page; | 1488 | return page; |
@@ -1592,7 +1593,6 @@ out: | |||
1592 | return err; | 1593 | return err; |
1593 | } | 1594 | } |
1594 | 1595 | ||
1595 | |||
1596 | static const struct xattr_handler *shmem_xattr_handlers[] = { | 1596 | static const struct xattr_handler *shmem_xattr_handlers[] = { |
1597 | #ifdef CONFIG_TMPFS_POSIX_ACL | 1597 | #ifdef CONFIG_TMPFS_POSIX_ACL |
1598 | &generic_acl_access_handler, | 1598 | &generic_acl_access_handler, |
@@ -2052,14 +2052,14 @@ static struct kmem_cache *shmem_inode_cachep; | |||
2052 | 2052 | ||
2053 | static struct inode *shmem_alloc_inode(struct super_block *sb) | 2053 | static struct inode *shmem_alloc_inode(struct super_block *sb) |
2054 | { | 2054 | { |
2055 | struct shmem_inode_info *p; | 2055 | struct shmem_inode_info *info; |
2056 | p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); | 2056 | info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); |
2057 | if (!p) | 2057 | if (!info) |
2058 | return NULL; | 2058 | return NULL; |
2059 | return &p->vfs_inode; | 2059 | return &info->vfs_inode; |
2060 | } | 2060 | } |
2061 | 2061 | ||
2062 | static void shmem_i_callback(struct rcu_head *head) | 2062 | static void shmem_destroy_callback(struct rcu_head *head) |
2063 | { | 2063 | { |
2064 | struct inode *inode = container_of(head, struct inode, i_rcu); | 2064 | struct inode *inode = container_of(head, struct inode, i_rcu); |
2065 | INIT_LIST_HEAD(&inode->i_dentry); | 2065 | INIT_LIST_HEAD(&inode->i_dentry); |
@@ -2072,25 +2072,24 @@ static void shmem_destroy_inode(struct inode *inode) | |||
2072 | /* only struct inode is valid if it's an inline symlink */ | 2072 | /* only struct inode is valid if it's an inline symlink */ |
2073 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); | 2073 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); |
2074 | } | 2074 | } |
2075 | call_rcu(&inode->i_rcu, shmem_i_callback); | 2075 | call_rcu(&inode->i_rcu, shmem_destroy_callback); |
2076 | } | 2076 | } |
2077 | 2077 | ||
2078 | static void init_once(void *foo) | 2078 | static void shmem_init_inode(void *foo) |
2079 | { | 2079 | { |
2080 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; | 2080 | struct shmem_inode_info *info = foo; |
2081 | 2081 | inode_init_once(&info->vfs_inode); | |
2082 | inode_init_once(&p->vfs_inode); | ||
2083 | } | 2082 | } |
2084 | 2083 | ||
2085 | static int init_inodecache(void) | 2084 | static int shmem_init_inodecache(void) |
2086 | { | 2085 | { |
2087 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", | 2086 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", |
2088 | sizeof(struct shmem_inode_info), | 2087 | sizeof(struct shmem_inode_info), |
2089 | 0, SLAB_PANIC, init_once); | 2088 | 0, SLAB_PANIC, shmem_init_inode); |
2090 | return 0; | 2089 | return 0; |
2091 | } | 2090 | } |
2092 | 2091 | ||
2093 | static void destroy_inodecache(void) | 2092 | static void shmem_destroy_inodecache(void) |
2094 | { | 2093 | { |
2095 | kmem_cache_destroy(shmem_inode_cachep); | 2094 | kmem_cache_destroy(shmem_inode_cachep); |
2096 | } | 2095 | } |
@@ -2187,21 +2186,20 @@ static const struct vm_operations_struct shmem_vm_ops = { | |||
2187 | #endif | 2186 | #endif |
2188 | }; | 2187 | }; |
2189 | 2188 | ||
2190 | |||
2191 | static struct dentry *shmem_mount(struct file_system_type *fs_type, | 2189 | static struct dentry *shmem_mount(struct file_system_type *fs_type, |
2192 | int flags, const char *dev_name, void *data) | 2190 | int flags, const char *dev_name, void *data) |
2193 | { | 2191 | { |
2194 | return mount_nodev(fs_type, flags, data, shmem_fill_super); | 2192 | return mount_nodev(fs_type, flags, data, shmem_fill_super); |
2195 | } | 2193 | } |
2196 | 2194 | ||
2197 | static struct file_system_type tmpfs_fs_type = { | 2195 | static struct file_system_type shmem_fs_type = { |
2198 | .owner = THIS_MODULE, | 2196 | .owner = THIS_MODULE, |
2199 | .name = "tmpfs", | 2197 | .name = "tmpfs", |
2200 | .mount = shmem_mount, | 2198 | .mount = shmem_mount, |
2201 | .kill_sb = kill_litter_super, | 2199 | .kill_sb = kill_litter_super, |
2202 | }; | 2200 | }; |
2203 | 2201 | ||
2204 | int __init init_tmpfs(void) | 2202 | int __init shmem_init(void) |
2205 | { | 2203 | { |
2206 | int error; | 2204 | int error; |
2207 | 2205 | ||
@@ -2209,18 +2207,18 @@ int __init init_tmpfs(void) | |||
2209 | if (error) | 2207 | if (error) |
2210 | goto out4; | 2208 | goto out4; |
2211 | 2209 | ||
2212 | error = init_inodecache(); | 2210 | error = shmem_init_inodecache(); |
2213 | if (error) | 2211 | if (error) |
2214 | goto out3; | 2212 | goto out3; |
2215 | 2213 | ||
2216 | error = register_filesystem(&tmpfs_fs_type); | 2214 | error = register_filesystem(&shmem_fs_type); |
2217 | if (error) { | 2215 | if (error) { |
2218 | printk(KERN_ERR "Could not register tmpfs\n"); | 2216 | printk(KERN_ERR "Could not register tmpfs\n"); |
2219 | goto out2; | 2217 | goto out2; |
2220 | } | 2218 | } |
2221 | 2219 | ||
2222 | shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, | 2220 | shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, |
2223 | tmpfs_fs_type.name, NULL); | 2221 | shmem_fs_type.name, NULL); |
2224 | if (IS_ERR(shm_mnt)) { | 2222 | if (IS_ERR(shm_mnt)) { |
2225 | error = PTR_ERR(shm_mnt); | 2223 | error = PTR_ERR(shm_mnt); |
2226 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); | 2224 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); |
@@ -2229,9 +2227,9 @@ int __init init_tmpfs(void) | |||
2229 | return 0; | 2227 | return 0; |
2230 | 2228 | ||
2231 | out1: | 2229 | out1: |
2232 | unregister_filesystem(&tmpfs_fs_type); | 2230 | unregister_filesystem(&shmem_fs_type); |
2233 | out2: | 2231 | out2: |
2234 | destroy_inodecache(); | 2232 | shmem_destroy_inodecache(); |
2235 | out3: | 2233 | out3: |
2236 | bdi_destroy(&shmem_backing_dev_info); | 2234 | bdi_destroy(&shmem_backing_dev_info); |
2237 | out4: | 2235 | out4: |
@@ -2241,37 +2239,37 @@ out4: | |||
2241 | 2239 | ||
2242 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 2240 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
2243 | /** | 2241 | /** |
2244 | * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file | 2242 | * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file |
2245 | * @inode: the inode to be searched | 2243 | * @inode: the inode to be searched |
2246 | * @pgoff: the offset to be searched | 2244 | * @index: the page offset to be searched |
2247 | * @pagep: the pointer for the found page to be stored | 2245 | * @pagep: the pointer for the found page to be stored |
2248 | * @ent: the pointer for the found swap entry to be stored | 2246 | * @swapp: the pointer for the found swap entry to be stored |
2249 | * | 2247 | * |
2250 | * If a page is found, refcount of it is incremented. Callers should handle | 2248 | * If a page is found, refcount of it is incremented. Callers should handle |
2251 | * these refcount. | 2249 | * these refcount. |
2252 | */ | 2250 | */ |
2253 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, | 2251 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, |
2254 | struct page **pagep, swp_entry_t *ent) | 2252 | struct page **pagep, swp_entry_t *swapp) |
2255 | { | 2253 | { |
2256 | swp_entry_t entry = { .val = 0 }; | ||
2257 | struct page *page = NULL; | ||
2258 | struct shmem_inode_info *info = SHMEM_I(inode); | 2254 | struct shmem_inode_info *info = SHMEM_I(inode); |
2255 | struct page *page = NULL; | ||
2256 | swp_entry_t swap = {0}; | ||
2259 | 2257 | ||
2260 | if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | 2258 | if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) |
2261 | goto out; | 2259 | goto out; |
2262 | 2260 | ||
2263 | spin_lock(&info->lock); | 2261 | spin_lock(&info->lock); |
2264 | #ifdef CONFIG_SWAP | 2262 | #ifdef CONFIG_SWAP |
2265 | entry = shmem_get_swap(info, pgoff); | 2263 | swap = shmem_get_swap(info, index); |
2266 | if (entry.val) | 2264 | if (swap.val) |
2267 | page = find_get_page(&swapper_space, entry.val); | 2265 | page = find_get_page(&swapper_space, swap.val); |
2268 | else | 2266 | else |
2269 | #endif | 2267 | #endif |
2270 | page = find_get_page(inode->i_mapping, pgoff); | 2268 | page = find_get_page(inode->i_mapping, index); |
2271 | spin_unlock(&info->lock); | 2269 | spin_unlock(&info->lock); |
2272 | out: | 2270 | out: |
2273 | *pagep = page; | 2271 | *pagep = page; |
2274 | *ent = entry; | 2272 | *swapp = swap; |
2275 | } | 2273 | } |
2276 | #endif | 2274 | #endif |
2277 | 2275 | ||
@@ -2288,23 +2286,23 @@ out: | |||
2288 | 2286 | ||
2289 | #include <linux/ramfs.h> | 2287 | #include <linux/ramfs.h> |
2290 | 2288 | ||
2291 | static struct file_system_type tmpfs_fs_type = { | 2289 | static struct file_system_type shmem_fs_type = { |
2292 | .name = "tmpfs", | 2290 | .name = "tmpfs", |
2293 | .mount = ramfs_mount, | 2291 | .mount = ramfs_mount, |
2294 | .kill_sb = kill_litter_super, | 2292 | .kill_sb = kill_litter_super, |
2295 | }; | 2293 | }; |
2296 | 2294 | ||
2297 | int __init init_tmpfs(void) | 2295 | int __init shmem_init(void) |
2298 | { | 2296 | { |
2299 | BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); | 2297 | BUG_ON(register_filesystem(&shmem_fs_type) != 0); |
2300 | 2298 | ||
2301 | shm_mnt = kern_mount(&tmpfs_fs_type); | 2299 | shm_mnt = kern_mount(&shmem_fs_type); |
2302 | BUG_ON(IS_ERR(shm_mnt)); | 2300 | BUG_ON(IS_ERR(shm_mnt)); |
2303 | 2301 | ||
2304 | return 0; | 2302 | return 0; |
2305 | } | 2303 | } |
2306 | 2304 | ||
2307 | int shmem_unuse(swp_entry_t entry, struct page *page) | 2305 | int shmem_unuse(swp_entry_t swap, struct page *page) |
2308 | { | 2306 | { |
2309 | return 0; | 2307 | return 0; |
2310 | } | 2308 | } |
@@ -2314,34 +2312,34 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) | |||
2314 | return 0; | 2312 | return 0; |
2315 | } | 2313 | } |
2316 | 2314 | ||
2317 | void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) | 2315 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
2318 | { | 2316 | { |
2319 | truncate_inode_pages_range(inode->i_mapping, start, end); | 2317 | truncate_inode_pages_range(inode->i_mapping, lstart, lend); |
2320 | } | 2318 | } |
2321 | EXPORT_SYMBOL_GPL(shmem_truncate_range); | 2319 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
2322 | 2320 | ||
2323 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 2321 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
2324 | /** | 2322 | /** |
2325 | * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file | 2323 | * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file |
2326 | * @inode: the inode to be searched | 2324 | * @inode: the inode to be searched |
2327 | * @pgoff: the offset to be searched | 2325 | * @index: the page offset to be searched |
2328 | * @pagep: the pointer for the found page to be stored | 2326 | * @pagep: the pointer for the found page to be stored |
2329 | * @ent: the pointer for the found swap entry to be stored | 2327 | * @swapp: the pointer for the found swap entry to be stored |
2330 | * | 2328 | * |
2331 | * If a page is found, refcount of it is incremented. Callers should handle | 2329 | * If a page is found, refcount of it is incremented. Callers should handle |
2332 | * these refcount. | 2330 | * these refcount. |
2333 | */ | 2331 | */ |
2334 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, | 2332 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, |
2335 | struct page **pagep, swp_entry_t *ent) | 2333 | struct page **pagep, swp_entry_t *swapp) |
2336 | { | 2334 | { |
2337 | struct page *page = NULL; | 2335 | struct page *page = NULL; |
2338 | 2336 | ||
2339 | if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | 2337 | if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) |
2340 | goto out; | 2338 | goto out; |
2341 | page = find_get_page(inode->i_mapping, pgoff); | 2339 | page = find_get_page(inode->i_mapping, index); |
2342 | out: | 2340 | out: |
2343 | *pagep = page; | 2341 | *pagep = page; |
2344 | *ent = (swp_entry_t){ .val = 0 }; | 2342 | *swapp = (swp_entry_t){0}; |
2345 | } | 2343 | } |
2346 | #endif | 2344 | #endif |
2347 | 2345 | ||