diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 12:04:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-22 12:04:48 -0400 |
commit | 95211279c5ad00a317c98221d7e4365e02f20836 (patch) | |
tree | 2ddc8625378d2915b8c96392f3cf6663b705ed55 /fs | |
parent | 5375871d432ae9fc581014ac117b96aaee3cd0c7 (diff) | |
parent | 12724850e8064f64b6223d26d78c0597c742c65a (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge first batch of patches from Andrew Morton:
"A few misc things and all the MM queue"
* emailed from Andrew Morton <akpm@linux-foundation.org>: (92 commits)
memcg: avoid THP split in task migration
thp: add HPAGE_PMD_* definitions for !CONFIG_TRANSPARENT_HUGEPAGE
memcg: clean up existing move charge code
mm/memcontrol.c: remove unnecessary 'break' in mem_cgroup_read()
mm/memcontrol.c: remove redundant BUG_ON() in mem_cgroup_usage_unregister_event()
mm/memcontrol.c: s/stealed/stolen/
memcg: fix performance of mem_cgroup_begin_update_page_stat()
memcg: remove PCG_FILE_MAPPED
memcg: use new logic for page stat accounting
memcg: remove PCG_MOVE_LOCK flag from page_cgroup
memcg: simplify move_account() check
memcg: remove EXPORT_SYMBOL(mem_cgroup_update_page_stat)
memcg: kill dead prev_priority stubs
memcg: remove PCG_CACHE page_cgroup flag
memcg: let css_get_next() rely upon rcu_read_lock()
cgroup: revert ss_id_lock to spinlock
idr: make idr_get_next() good for rcu_read_lock()
memcg: remove unnecessary thp check in page stat accounting
memcg: remove redundant returns
memcg: enum lru_list lru
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/exec.c | 2 | ||||
-rw-r--r-- | fs/hugetlbfs/inode.c | 138 | ||||
-rw-r--r-- | fs/namei.c | 6 | ||||
-rw-r--r-- | fs/proc/base.c | 12 | ||||
-rw-r--r-- | fs/proc/internal.h | 9 | ||||
-rw-r--r-- | fs/proc/page.c | 2 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 357 | ||||
-rw-r--r-- | fs/proc/task_nommu.c | 69 | ||||
-rw-r--r-- | fs/seq_file.c | 28 |
9 files changed, 432 insertions, 191 deletions
@@ -822,7 +822,7 @@ static int exec_mmap(struct mm_struct *mm) | |||
822 | /* Notify parent that we're no longer interested in the old VM */ | 822 | /* Notify parent that we're no longer interested in the old VM */ |
823 | tsk = current; | 823 | tsk = current; |
824 | old_mm = current->mm; | 824 | old_mm = current->mm; |
825 | sync_mm_rss(tsk, old_mm); | 825 | sync_mm_rss(old_mm); |
826 | mm_release(tsk, old_mm); | 826 | mm_release(tsk, old_mm); |
827 | 827 | ||
828 | if (old_mm) { | 828 | if (old_mm) { |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 81932fa1861a..ea251749d9d5 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -41,6 +41,25 @@ const struct file_operations hugetlbfs_file_operations; | |||
41 | static const struct inode_operations hugetlbfs_dir_inode_operations; | 41 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
42 | static const struct inode_operations hugetlbfs_inode_operations; | 42 | static const struct inode_operations hugetlbfs_inode_operations; |
43 | 43 | ||
44 | struct hugetlbfs_config { | ||
45 | uid_t uid; | ||
46 | gid_t gid; | ||
47 | umode_t mode; | ||
48 | long nr_blocks; | ||
49 | long nr_inodes; | ||
50 | struct hstate *hstate; | ||
51 | }; | ||
52 | |||
53 | struct hugetlbfs_inode_info { | ||
54 | struct shared_policy policy; | ||
55 | struct inode vfs_inode; | ||
56 | }; | ||
57 | |||
58 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) | ||
59 | { | ||
60 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); | ||
61 | } | ||
62 | |||
44 | static struct backing_dev_info hugetlbfs_backing_dev_info = { | 63 | static struct backing_dev_info hugetlbfs_backing_dev_info = { |
45 | .name = "hugetlbfs", | 64 | .name = "hugetlbfs", |
46 | .ra_pages = 0, /* No readahead */ | 65 | .ra_pages = 0, /* No readahead */ |
@@ -154,10 +173,12 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
154 | return addr; | 173 | return addr; |
155 | } | 174 | } |
156 | 175 | ||
157 | start_addr = mm->free_area_cache; | 176 | if (len > mm->cached_hole_size) |
158 | 177 | start_addr = mm->free_area_cache; | |
159 | if (len <= mm->cached_hole_size) | 178 | else { |
160 | start_addr = TASK_UNMAPPED_BASE; | 179 | start_addr = TASK_UNMAPPED_BASE; |
180 | mm->cached_hole_size = 0; | ||
181 | } | ||
161 | 182 | ||
162 | full_search: | 183 | full_search: |
163 | addr = ALIGN(start_addr, huge_page_size(h)); | 184 | addr = ALIGN(start_addr, huge_page_size(h)); |
@@ -171,13 +192,18 @@ full_search: | |||
171 | */ | 192 | */ |
172 | if (start_addr != TASK_UNMAPPED_BASE) { | 193 | if (start_addr != TASK_UNMAPPED_BASE) { |
173 | start_addr = TASK_UNMAPPED_BASE; | 194 | start_addr = TASK_UNMAPPED_BASE; |
195 | mm->cached_hole_size = 0; | ||
174 | goto full_search; | 196 | goto full_search; |
175 | } | 197 | } |
176 | return -ENOMEM; | 198 | return -ENOMEM; |
177 | } | 199 | } |
178 | 200 | ||
179 | if (!vma || addr + len <= vma->vm_start) | 201 | if (!vma || addr + len <= vma->vm_start) { |
202 | mm->free_area_cache = addr + len; | ||
180 | return addr; | 203 | return addr; |
204 | } | ||
205 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
206 | mm->cached_hole_size = vma->vm_start - addr; | ||
181 | addr = ALIGN(vma->vm_end, huge_page_size(h)); | 207 | addr = ALIGN(vma->vm_end, huge_page_size(h)); |
182 | } | 208 | } |
183 | } | 209 | } |
@@ -238,17 +264,10 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | |||
238 | loff_t isize; | 264 | loff_t isize; |
239 | ssize_t retval = 0; | 265 | ssize_t retval = 0; |
240 | 266 | ||
241 | mutex_lock(&inode->i_mutex); | ||
242 | |||
243 | /* validate length */ | 267 | /* validate length */ |
244 | if (len == 0) | 268 | if (len == 0) |
245 | goto out; | 269 | goto out; |
246 | 270 | ||
247 | isize = i_size_read(inode); | ||
248 | if (!isize) | ||
249 | goto out; | ||
250 | |||
251 | end_index = (isize - 1) >> huge_page_shift(h); | ||
252 | for (;;) { | 271 | for (;;) { |
253 | struct page *page; | 272 | struct page *page; |
254 | unsigned long nr, ret; | 273 | unsigned long nr, ret; |
@@ -256,18 +275,21 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | |||
256 | 275 | ||
257 | /* nr is the maximum number of bytes to copy from this page */ | 276 | /* nr is the maximum number of bytes to copy from this page */ |
258 | nr = huge_page_size(h); | 277 | nr = huge_page_size(h); |
278 | isize = i_size_read(inode); | ||
279 | if (!isize) | ||
280 | goto out; | ||
281 | end_index = (isize - 1) >> huge_page_shift(h); | ||
259 | if (index >= end_index) { | 282 | if (index >= end_index) { |
260 | if (index > end_index) | 283 | if (index > end_index) |
261 | goto out; | 284 | goto out; |
262 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; | 285 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
263 | if (nr <= offset) { | 286 | if (nr <= offset) |
264 | goto out; | 287 | goto out; |
265 | } | ||
266 | } | 288 | } |
267 | nr = nr - offset; | 289 | nr = nr - offset; |
268 | 290 | ||
269 | /* Find the page */ | 291 | /* Find the page */ |
270 | page = find_get_page(mapping, index); | 292 | page = find_lock_page(mapping, index); |
271 | if (unlikely(page == NULL)) { | 293 | if (unlikely(page == NULL)) { |
272 | /* | 294 | /* |
273 | * We have a HOLE, zero out the user-buffer for the | 295 | * We have a HOLE, zero out the user-buffer for the |
@@ -279,17 +301,18 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | |||
279 | else | 301 | else |
280 | ra = 0; | 302 | ra = 0; |
281 | } else { | 303 | } else { |
304 | unlock_page(page); | ||
305 | |||
282 | /* | 306 | /* |
283 | * We have the page, copy it to user space buffer. | 307 | * We have the page, copy it to user space buffer. |
284 | */ | 308 | */ |
285 | ra = hugetlbfs_read_actor(page, offset, buf, len, nr); | 309 | ra = hugetlbfs_read_actor(page, offset, buf, len, nr); |
286 | ret = ra; | 310 | ret = ra; |
311 | page_cache_release(page); | ||
287 | } | 312 | } |
288 | if (ra < 0) { | 313 | if (ra < 0) { |
289 | if (retval == 0) | 314 | if (retval == 0) |
290 | retval = ra; | 315 | retval = ra; |
291 | if (page) | ||
292 | page_cache_release(page); | ||
293 | goto out; | 316 | goto out; |
294 | } | 317 | } |
295 | 318 | ||
@@ -299,16 +322,12 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, | |||
299 | index += offset >> huge_page_shift(h); | 322 | index += offset >> huge_page_shift(h); |
300 | offset &= ~huge_page_mask(h); | 323 | offset &= ~huge_page_mask(h); |
301 | 324 | ||
302 | if (page) | ||
303 | page_cache_release(page); | ||
304 | |||
305 | /* short read or no more work */ | 325 | /* short read or no more work */ |
306 | if ((ret != nr) || (len == 0)) | 326 | if ((ret != nr) || (len == 0)) |
307 | break; | 327 | break; |
308 | } | 328 | } |
309 | out: | 329 | out: |
310 | *ppos = ((loff_t)index << huge_page_shift(h)) + offset; | 330 | *ppos = ((loff_t)index << huge_page_shift(h)) + offset; |
311 | mutex_unlock(&inode->i_mutex); | ||
312 | return retval; | 331 | return retval; |
313 | } | 332 | } |
314 | 333 | ||
@@ -607,9 +626,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
607 | spin_lock(&sbinfo->stat_lock); | 626 | spin_lock(&sbinfo->stat_lock); |
608 | /* If no limits set, just report 0 for max/free/used | 627 | /* If no limits set, just report 0 for max/free/used |
609 | * blocks, like simple_statfs() */ | 628 | * blocks, like simple_statfs() */ |
610 | if (sbinfo->max_blocks >= 0) { | 629 | if (sbinfo->spool) { |
611 | buf->f_blocks = sbinfo->max_blocks; | 630 | long free_pages; |
612 | buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; | 631 | |
632 | spin_lock(&sbinfo->spool->lock); | ||
633 | buf->f_blocks = sbinfo->spool->max_hpages; | ||
634 | free_pages = sbinfo->spool->max_hpages | ||
635 | - sbinfo->spool->used_hpages; | ||
636 | buf->f_bavail = buf->f_bfree = free_pages; | ||
637 | spin_unlock(&sbinfo->spool->lock); | ||
613 | buf->f_files = sbinfo->max_inodes; | 638 | buf->f_files = sbinfo->max_inodes; |
614 | buf->f_ffree = sbinfo->free_inodes; | 639 | buf->f_ffree = sbinfo->free_inodes; |
615 | } | 640 | } |
@@ -625,6 +650,10 @@ static void hugetlbfs_put_super(struct super_block *sb) | |||
625 | 650 | ||
626 | if (sbi) { | 651 | if (sbi) { |
627 | sb->s_fs_info = NULL; | 652 | sb->s_fs_info = NULL; |
653 | |||
654 | if (sbi->spool) | ||
655 | hugepage_put_subpool(sbi->spool); | ||
656 | |||
628 | kfree(sbi); | 657 | kfree(sbi); |
629 | } | 658 | } |
630 | } | 659 | } |
@@ -853,10 +882,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) | |||
853 | sb->s_fs_info = sbinfo; | 882 | sb->s_fs_info = sbinfo; |
854 | sbinfo->hstate = config.hstate; | 883 | sbinfo->hstate = config.hstate; |
855 | spin_lock_init(&sbinfo->stat_lock); | 884 | spin_lock_init(&sbinfo->stat_lock); |
856 | sbinfo->max_blocks = config.nr_blocks; | ||
857 | sbinfo->free_blocks = config.nr_blocks; | ||
858 | sbinfo->max_inodes = config.nr_inodes; | 885 | sbinfo->max_inodes = config.nr_inodes; |
859 | sbinfo->free_inodes = config.nr_inodes; | 886 | sbinfo->free_inodes = config.nr_inodes; |
887 | sbinfo->spool = NULL; | ||
888 | if (config.nr_blocks != -1) { | ||
889 | sbinfo->spool = hugepage_new_subpool(config.nr_blocks); | ||
890 | if (!sbinfo->spool) | ||
891 | goto out_free; | ||
892 | } | ||
860 | sb->s_maxbytes = MAX_LFS_FILESIZE; | 893 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
861 | sb->s_blocksize = huge_page_size(config.hstate); | 894 | sb->s_blocksize = huge_page_size(config.hstate); |
862 | sb->s_blocksize_bits = huge_page_shift(config.hstate); | 895 | sb->s_blocksize_bits = huge_page_shift(config.hstate); |
@@ -868,38 +901,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) | |||
868 | goto out_free; | 901 | goto out_free; |
869 | return 0; | 902 | return 0; |
870 | out_free: | 903 | out_free: |
904 | if (sbinfo->spool) | ||
905 | kfree(sbinfo->spool); | ||
871 | kfree(sbinfo); | 906 | kfree(sbinfo); |
872 | return -ENOMEM; | 907 | return -ENOMEM; |
873 | } | 908 | } |
874 | 909 | ||
875 | int hugetlb_get_quota(struct address_space *mapping, long delta) | ||
876 | { | ||
877 | int ret = 0; | ||
878 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); | ||
879 | |||
880 | if (sbinfo->free_blocks > -1) { | ||
881 | spin_lock(&sbinfo->stat_lock); | ||
882 | if (sbinfo->free_blocks - delta >= 0) | ||
883 | sbinfo->free_blocks -= delta; | ||
884 | else | ||
885 | ret = -ENOMEM; | ||
886 | spin_unlock(&sbinfo->stat_lock); | ||
887 | } | ||
888 | |||
889 | return ret; | ||
890 | } | ||
891 | |||
892 | void hugetlb_put_quota(struct address_space *mapping, long delta) | ||
893 | { | ||
894 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); | ||
895 | |||
896 | if (sbinfo->free_blocks > -1) { | ||
897 | spin_lock(&sbinfo->stat_lock); | ||
898 | sbinfo->free_blocks += delta; | ||
899 | spin_unlock(&sbinfo->stat_lock); | ||
900 | } | ||
901 | } | ||
902 | |||
903 | static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, | 910 | static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, |
904 | int flags, const char *dev_name, void *data) | 911 | int flags, const char *dev_name, void *data) |
905 | { | 912 | { |
@@ -919,8 +926,8 @@ static int can_do_hugetlb_shm(void) | |||
919 | return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); | 926 | return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); |
920 | } | 927 | } |
921 | 928 | ||
922 | struct file *hugetlb_file_setup(const char *name, size_t size, | 929 | struct file *hugetlb_file_setup(const char *name, unsigned long addr, |
923 | vm_flags_t acctflag, | 930 | size_t size, vm_flags_t acctflag, |
924 | struct user_struct **user, int creat_flags) | 931 | struct user_struct **user, int creat_flags) |
925 | { | 932 | { |
926 | int error = -ENOMEM; | 933 | int error = -ENOMEM; |
@@ -929,6 +936,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size, | |||
929 | struct path path; | 936 | struct path path; |
930 | struct dentry *root; | 937 | struct dentry *root; |
931 | struct qstr quick_string; | 938 | struct qstr quick_string; |
939 | struct hstate *hstate; | ||
940 | unsigned long num_pages; | ||
932 | 941 | ||
933 | *user = NULL; | 942 | *user = NULL; |
934 | if (!hugetlbfs_vfsmount) | 943 | if (!hugetlbfs_vfsmount) |
@@ -937,7 +946,11 @@ struct file *hugetlb_file_setup(const char *name, size_t size, | |||
937 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { | 946 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
938 | *user = current_user(); | 947 | *user = current_user(); |
939 | if (user_shm_lock(size, *user)) { | 948 | if (user_shm_lock(size, *user)) { |
940 | printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n"); | 949 | task_lock(current); |
950 | printk_once(KERN_WARNING | ||
951 | "%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", | ||
952 | current->comm, current->pid); | ||
953 | task_unlock(current); | ||
941 | } else { | 954 | } else { |
942 | *user = NULL; | 955 | *user = NULL; |
943 | return ERR_PTR(-EPERM); | 956 | return ERR_PTR(-EPERM); |
@@ -958,10 +971,12 @@ struct file *hugetlb_file_setup(const char *name, size_t size, | |||
958 | if (!inode) | 971 | if (!inode) |
959 | goto out_dentry; | 972 | goto out_dentry; |
960 | 973 | ||
974 | hstate = hstate_inode(inode); | ||
975 | size += addr & ~huge_page_mask(hstate); | ||
976 | num_pages = ALIGN(size, huge_page_size(hstate)) >> | ||
977 | huge_page_shift(hstate); | ||
961 | error = -ENOMEM; | 978 | error = -ENOMEM; |
962 | if (hugetlb_reserve_pages(inode, 0, | 979 | if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag)) |
963 | size >> huge_page_shift(hstate_inode(inode)), NULL, | ||
964 | acctflag)) | ||
965 | goto out_inode; | 980 | goto out_inode; |
966 | 981 | ||
967 | d_instantiate(path.dentry, inode); | 982 | d_instantiate(path.dentry, inode); |
@@ -997,6 +1012,7 @@ static int __init init_hugetlbfs_fs(void) | |||
997 | if (error) | 1012 | if (error) |
998 | return error; | 1013 | return error; |
999 | 1014 | ||
1015 | error = -ENOMEM; | ||
1000 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", | 1016 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
1001 | sizeof(struct hugetlbfs_inode_info), | 1017 | sizeof(struct hugetlbfs_inode_info), |
1002 | 0, 0, init_once); | 1018 | 0, 0, init_once); |
@@ -1015,10 +1031,10 @@ static int __init init_hugetlbfs_fs(void) | |||
1015 | } | 1031 | } |
1016 | 1032 | ||
1017 | error = PTR_ERR(vfsmount); | 1033 | error = PTR_ERR(vfsmount); |
1034 | unregister_filesystem(&hugetlbfs_fs_type); | ||
1018 | 1035 | ||
1019 | out: | 1036 | out: |
1020 | if (error) | 1037 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
1021 | kmem_cache_destroy(hugetlbfs_inode_cachep); | ||
1022 | out2: | 1038 | out2: |
1023 | bdi_destroy(&hugetlbfs_backing_dev_info); | 1039 | bdi_destroy(&hugetlbfs_backing_dev_info); |
1024 | return error; | 1040 | return error; |
diff --git a/fs/namei.c b/fs/namei.c index 13e6a1f191a9..a94a7f9a03ea 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1455,9 +1455,15 @@ done: | |||
1455 | } | 1455 | } |
1456 | EXPORT_SYMBOL(full_name_hash); | 1456 | EXPORT_SYMBOL(full_name_hash); |
1457 | 1457 | ||
1458 | #ifdef CONFIG_64BIT | ||
1458 | #define ONEBYTES 0x0101010101010101ul | 1459 | #define ONEBYTES 0x0101010101010101ul |
1459 | #define SLASHBYTES 0x2f2f2f2f2f2f2f2ful | 1460 | #define SLASHBYTES 0x2f2f2f2f2f2f2f2ful |
1460 | #define HIGHBITS 0x8080808080808080ul | 1461 | #define HIGHBITS 0x8080808080808080ul |
1462 | #else | ||
1463 | #define ONEBYTES 0x01010101ul | ||
1464 | #define SLASHBYTES 0x2f2f2f2ful | ||
1465 | #define HIGHBITS 0x80808080ul | ||
1466 | #endif | ||
1461 | 1467 | ||
1462 | /* Return the high bit set in the first byte that is a zero */ | 1468 | /* Return the high bit set in the first byte that is a zero */ |
1463 | static inline unsigned long has_zero(unsigned long a) | 1469 | static inline unsigned long has_zero(unsigned long a) |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 965d4bde3a3b..3b42c1418f31 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -2989,9 +2989,9 @@ static const struct pid_entry tgid_base_stuff[] = { | |||
2989 | INF("cmdline", S_IRUGO, proc_pid_cmdline), | 2989 | INF("cmdline", S_IRUGO, proc_pid_cmdline), |
2990 | ONE("stat", S_IRUGO, proc_tgid_stat), | 2990 | ONE("stat", S_IRUGO, proc_tgid_stat), |
2991 | ONE("statm", S_IRUGO, proc_pid_statm), | 2991 | ONE("statm", S_IRUGO, proc_pid_statm), |
2992 | REG("maps", S_IRUGO, proc_maps_operations), | 2992 | REG("maps", S_IRUGO, proc_pid_maps_operations), |
2993 | #ifdef CONFIG_NUMA | 2993 | #ifdef CONFIG_NUMA |
2994 | REG("numa_maps", S_IRUGO, proc_numa_maps_operations), | 2994 | REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations), |
2995 | #endif | 2995 | #endif |
2996 | REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), | 2996 | REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), |
2997 | LNK("cwd", proc_cwd_link), | 2997 | LNK("cwd", proc_cwd_link), |
@@ -3002,7 +3002,7 @@ static const struct pid_entry tgid_base_stuff[] = { | |||
3002 | REG("mountstats", S_IRUSR, proc_mountstats_operations), | 3002 | REG("mountstats", S_IRUSR, proc_mountstats_operations), |
3003 | #ifdef CONFIG_PROC_PAGE_MONITOR | 3003 | #ifdef CONFIG_PROC_PAGE_MONITOR |
3004 | REG("clear_refs", S_IWUSR, proc_clear_refs_operations), | 3004 | REG("clear_refs", S_IWUSR, proc_clear_refs_operations), |
3005 | REG("smaps", S_IRUGO, proc_smaps_operations), | 3005 | REG("smaps", S_IRUGO, proc_pid_smaps_operations), |
3006 | REG("pagemap", S_IRUGO, proc_pagemap_operations), | 3006 | REG("pagemap", S_IRUGO, proc_pagemap_operations), |
3007 | #endif | 3007 | #endif |
3008 | #ifdef CONFIG_SECURITY | 3008 | #ifdef CONFIG_SECURITY |
@@ -3348,9 +3348,9 @@ static const struct pid_entry tid_base_stuff[] = { | |||
3348 | INF("cmdline", S_IRUGO, proc_pid_cmdline), | 3348 | INF("cmdline", S_IRUGO, proc_pid_cmdline), |
3349 | ONE("stat", S_IRUGO, proc_tid_stat), | 3349 | ONE("stat", S_IRUGO, proc_tid_stat), |
3350 | ONE("statm", S_IRUGO, proc_pid_statm), | 3350 | ONE("statm", S_IRUGO, proc_pid_statm), |
3351 | REG("maps", S_IRUGO, proc_maps_operations), | 3351 | REG("maps", S_IRUGO, proc_tid_maps_operations), |
3352 | #ifdef CONFIG_NUMA | 3352 | #ifdef CONFIG_NUMA |
3353 | REG("numa_maps", S_IRUGO, proc_numa_maps_operations), | 3353 | REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations), |
3354 | #endif | 3354 | #endif |
3355 | REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), | 3355 | REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), |
3356 | LNK("cwd", proc_cwd_link), | 3356 | LNK("cwd", proc_cwd_link), |
@@ -3360,7 +3360,7 @@ static const struct pid_entry tid_base_stuff[] = { | |||
3360 | REG("mountinfo", S_IRUGO, proc_mountinfo_operations), | 3360 | REG("mountinfo", S_IRUGO, proc_mountinfo_operations), |
3361 | #ifdef CONFIG_PROC_PAGE_MONITOR | 3361 | #ifdef CONFIG_PROC_PAGE_MONITOR |
3362 | REG("clear_refs", S_IWUSR, proc_clear_refs_operations), | 3362 | REG("clear_refs", S_IWUSR, proc_clear_refs_operations), |
3363 | REG("smaps", S_IRUGO, proc_smaps_operations), | 3363 | REG("smaps", S_IRUGO, proc_tid_smaps_operations), |
3364 | REG("pagemap", S_IRUGO, proc_pagemap_operations), | 3364 | REG("pagemap", S_IRUGO, proc_pagemap_operations), |
3365 | #endif | 3365 | #endif |
3366 | #ifdef CONFIG_SECURITY | 3366 | #ifdef CONFIG_SECURITY |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 292577531ad1..c44efe19798f 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -53,9 +53,12 @@ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, | |||
53 | struct pid *pid, struct task_struct *task); | 53 | struct pid *pid, struct task_struct *task); |
54 | extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); | 54 | extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); |
55 | 55 | ||
56 | extern const struct file_operations proc_maps_operations; | 56 | extern const struct file_operations proc_pid_maps_operations; |
57 | extern const struct file_operations proc_numa_maps_operations; | 57 | extern const struct file_operations proc_tid_maps_operations; |
58 | extern const struct file_operations proc_smaps_operations; | 58 | extern const struct file_operations proc_pid_numa_maps_operations; |
59 | extern const struct file_operations proc_tid_numa_maps_operations; | ||
60 | extern const struct file_operations proc_pid_smaps_operations; | ||
61 | extern const struct file_operations proc_tid_smaps_operations; | ||
59 | extern const struct file_operations proc_clear_refs_operations; | 62 | extern const struct file_operations proc_clear_refs_operations; |
60 | extern const struct file_operations proc_pagemap_operations; | 63 | extern const struct file_operations proc_pagemap_operations; |
61 | extern const struct file_operations proc_net_operations; | 64 | extern const struct file_operations proc_net_operations; |
diff --git a/fs/proc/page.c b/fs/proc/page.c index 6d8e6a9e93ab..7fcd0d60a968 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -115,6 +115,8 @@ u64 stable_page_flags(struct page *page) | |||
115 | u |= 1 << KPF_COMPOUND_TAIL; | 115 | u |= 1 << KPF_COMPOUND_TAIL; |
116 | if (PageHuge(page)) | 116 | if (PageHuge(page)) |
117 | u |= 1 << KPF_HUGE; | 117 | u |= 1 << KPF_HUGE; |
118 | else if (PageTransCompound(page)) | ||
119 | u |= 1 << KPF_THP; | ||
118 | 120 | ||
119 | /* | 121 | /* |
120 | * Caveats on high order pages: page->_count will only be set | 122 | * Caveats on high order pages: page->_count will only be set |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 7dcd2a250495..9694cc283511 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -209,16 +209,20 @@ static int do_maps_open(struct inode *inode, struct file *file, | |||
209 | return ret; | 209 | return ret; |
210 | } | 210 | } |
211 | 211 | ||
212 | static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) | 212 | static void |
213 | show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) | ||
213 | { | 214 | { |
214 | struct mm_struct *mm = vma->vm_mm; | 215 | struct mm_struct *mm = vma->vm_mm; |
215 | struct file *file = vma->vm_file; | 216 | struct file *file = vma->vm_file; |
217 | struct proc_maps_private *priv = m->private; | ||
218 | struct task_struct *task = priv->task; | ||
216 | vm_flags_t flags = vma->vm_flags; | 219 | vm_flags_t flags = vma->vm_flags; |
217 | unsigned long ino = 0; | 220 | unsigned long ino = 0; |
218 | unsigned long long pgoff = 0; | 221 | unsigned long long pgoff = 0; |
219 | unsigned long start, end; | 222 | unsigned long start, end; |
220 | dev_t dev = 0; | 223 | dev_t dev = 0; |
221 | int len; | 224 | int len; |
225 | const char *name = NULL; | ||
222 | 226 | ||
223 | if (file) { | 227 | if (file) { |
224 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 228 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
@@ -252,36 +256,57 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) | |||
252 | if (file) { | 256 | if (file) { |
253 | pad_len_spaces(m, len); | 257 | pad_len_spaces(m, len); |
254 | seq_path(m, &file->f_path, "\n"); | 258 | seq_path(m, &file->f_path, "\n"); |
255 | } else { | 259 | goto done; |
256 | const char *name = arch_vma_name(vma); | 260 | } |
257 | if (!name) { | 261 | |
258 | if (mm) { | 262 | name = arch_vma_name(vma); |
259 | if (vma->vm_start <= mm->brk && | 263 | if (!name) { |
260 | vma->vm_end >= mm->start_brk) { | 264 | pid_t tid; |
261 | name = "[heap]"; | 265 | |
262 | } else if (vma->vm_start <= mm->start_stack && | 266 | if (!mm) { |
263 | vma->vm_end >= mm->start_stack) { | 267 | name = "[vdso]"; |
264 | name = "[stack]"; | 268 | goto done; |
265 | } | 269 | } |
270 | |||
271 | if (vma->vm_start <= mm->brk && | ||
272 | vma->vm_end >= mm->start_brk) { | ||
273 | name = "[heap]"; | ||
274 | goto done; | ||
275 | } | ||
276 | |||
277 | tid = vm_is_stack(task, vma, is_pid); | ||
278 | |||
279 | if (tid != 0) { | ||
280 | /* | ||
281 | * Thread stack in /proc/PID/task/TID/maps or | ||
282 | * the main process stack. | ||
283 | */ | ||
284 | if (!is_pid || (vma->vm_start <= mm->start_stack && | ||
285 | vma->vm_end >= mm->start_stack)) { | ||
286 | name = "[stack]"; | ||
266 | } else { | 287 | } else { |
267 | name = "[vdso]"; | 288 | /* Thread stack in /proc/PID/maps */ |
289 | pad_len_spaces(m, len); | ||
290 | seq_printf(m, "[stack:%d]", tid); | ||
268 | } | 291 | } |
269 | } | 292 | } |
270 | if (name) { | 293 | } |
271 | pad_len_spaces(m, len); | 294 | |
272 | seq_puts(m, name); | 295 | done: |
273 | } | 296 | if (name) { |
297 | pad_len_spaces(m, len); | ||
298 | seq_puts(m, name); | ||
274 | } | 299 | } |
275 | seq_putc(m, '\n'); | 300 | seq_putc(m, '\n'); |
276 | } | 301 | } |
277 | 302 | ||
278 | static int show_map(struct seq_file *m, void *v) | 303 | static int show_map(struct seq_file *m, void *v, int is_pid) |
279 | { | 304 | { |
280 | struct vm_area_struct *vma = v; | 305 | struct vm_area_struct *vma = v; |
281 | struct proc_maps_private *priv = m->private; | 306 | struct proc_maps_private *priv = m->private; |
282 | struct task_struct *task = priv->task; | 307 | struct task_struct *task = priv->task; |
283 | 308 | ||
284 | show_map_vma(m, vma); | 309 | show_map_vma(m, vma, is_pid); |
285 | 310 | ||
286 | if (m->count < m->size) /* vma is copied successfully */ | 311 | if (m->count < m->size) /* vma is copied successfully */ |
287 | m->version = (vma != get_gate_vma(task->mm)) | 312 | m->version = (vma != get_gate_vma(task->mm)) |
@@ -289,20 +314,49 @@ static int show_map(struct seq_file *m, void *v) | |||
289 | return 0; | 314 | return 0; |
290 | } | 315 | } |
291 | 316 | ||
317 | static int show_pid_map(struct seq_file *m, void *v) | ||
318 | { | ||
319 | return show_map(m, v, 1); | ||
320 | } | ||
321 | |||
322 | static int show_tid_map(struct seq_file *m, void *v) | ||
323 | { | ||
324 | return show_map(m, v, 0); | ||
325 | } | ||
326 | |||
292 | static const struct seq_operations proc_pid_maps_op = { | 327 | static const struct seq_operations proc_pid_maps_op = { |
293 | .start = m_start, | 328 | .start = m_start, |
294 | .next = m_next, | 329 | .next = m_next, |
295 | .stop = m_stop, | 330 | .stop = m_stop, |
296 | .show = show_map | 331 | .show = show_pid_map |
297 | }; | 332 | }; |
298 | 333 | ||
299 | static int maps_open(struct inode *inode, struct file *file) | 334 | static const struct seq_operations proc_tid_maps_op = { |
335 | .start = m_start, | ||
336 | .next = m_next, | ||
337 | .stop = m_stop, | ||
338 | .show = show_tid_map | ||
339 | }; | ||
340 | |||
341 | static int pid_maps_open(struct inode *inode, struct file *file) | ||
300 | { | 342 | { |
301 | return do_maps_open(inode, file, &proc_pid_maps_op); | 343 | return do_maps_open(inode, file, &proc_pid_maps_op); |
302 | } | 344 | } |
303 | 345 | ||
304 | const struct file_operations proc_maps_operations = { | 346 | static int tid_maps_open(struct inode *inode, struct file *file) |
305 | .open = maps_open, | 347 | { |
348 | return do_maps_open(inode, file, &proc_tid_maps_op); | ||
349 | } | ||
350 | |||
351 | const struct file_operations proc_pid_maps_operations = { | ||
352 | .open = pid_maps_open, | ||
353 | .read = seq_read, | ||
354 | .llseek = seq_lseek, | ||
355 | .release = seq_release_private, | ||
356 | }; | ||
357 | |||
358 | const struct file_operations proc_tid_maps_operations = { | ||
359 | .open = tid_maps_open, | ||
306 | .read = seq_read, | 360 | .read = seq_read, |
307 | .llseek = seq_lseek, | 361 | .llseek = seq_lseek, |
308 | .release = seq_release_private, | 362 | .release = seq_release_private, |
@@ -394,21 +448,15 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
394 | pte_t *pte; | 448 | pte_t *pte; |
395 | spinlock_t *ptl; | 449 | spinlock_t *ptl; |
396 | 450 | ||
397 | spin_lock(&walk->mm->page_table_lock); | 451 | if (pmd_trans_huge_lock(pmd, vma) == 1) { |
398 | if (pmd_trans_huge(*pmd)) { | 452 | smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk); |
399 | if (pmd_trans_splitting(*pmd)) { | ||
400 | spin_unlock(&walk->mm->page_table_lock); | ||
401 | wait_split_huge_page(vma->anon_vma, pmd); | ||
402 | } else { | ||
403 | smaps_pte_entry(*(pte_t *)pmd, addr, | ||
404 | HPAGE_PMD_SIZE, walk); | ||
405 | spin_unlock(&walk->mm->page_table_lock); | ||
406 | mss->anonymous_thp += HPAGE_PMD_SIZE; | ||
407 | return 0; | ||
408 | } | ||
409 | } else { | ||
410 | spin_unlock(&walk->mm->page_table_lock); | 453 | spin_unlock(&walk->mm->page_table_lock); |
454 | mss->anonymous_thp += HPAGE_PMD_SIZE; | ||
455 | return 0; | ||
411 | } | 456 | } |
457 | |||
458 | if (pmd_trans_unstable(pmd)) | ||
459 | return 0; | ||
412 | /* | 460 | /* |
413 | * The mmap_sem held all the way back in m_start() is what | 461 | * The mmap_sem held all the way back in m_start() is what |
414 | * keeps khugepaged out of here and from collapsing things | 462 | * keeps khugepaged out of here and from collapsing things |
@@ -422,7 +470,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
422 | return 0; | 470 | return 0; |
423 | } | 471 | } |
424 | 472 | ||
425 | static int show_smap(struct seq_file *m, void *v) | 473 | static int show_smap(struct seq_file *m, void *v, int is_pid) |
426 | { | 474 | { |
427 | struct proc_maps_private *priv = m->private; | 475 | struct proc_maps_private *priv = m->private; |
428 | struct task_struct *task = priv->task; | 476 | struct task_struct *task = priv->task; |
@@ -440,7 +488,7 @@ static int show_smap(struct seq_file *m, void *v) | |||
440 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) | 488 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) |
441 | walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); | 489 | walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); |
442 | 490 | ||
443 | show_map_vma(m, vma); | 491 | show_map_vma(m, vma, is_pid); |
444 | 492 | ||
445 | seq_printf(m, | 493 | seq_printf(m, |
446 | "Size: %8lu kB\n" | 494 | "Size: %8lu kB\n" |
@@ -479,20 +527,49 @@ static int show_smap(struct seq_file *m, void *v) | |||
479 | return 0; | 527 | return 0; |
480 | } | 528 | } |
481 | 529 | ||
530 | static int show_pid_smap(struct seq_file *m, void *v) | ||
531 | { | ||
532 | return show_smap(m, v, 1); | ||
533 | } | ||
534 | |||
535 | static int show_tid_smap(struct seq_file *m, void *v) | ||
536 | { | ||
537 | return show_smap(m, v, 0); | ||
538 | } | ||
539 | |||
482 | static const struct seq_operations proc_pid_smaps_op = { | 540 | static const struct seq_operations proc_pid_smaps_op = { |
483 | .start = m_start, | 541 | .start = m_start, |
484 | .next = m_next, | 542 | .next = m_next, |
485 | .stop = m_stop, | 543 | .stop = m_stop, |
486 | .show = show_smap | 544 | .show = show_pid_smap |
545 | }; | ||
546 | |||
547 | static const struct seq_operations proc_tid_smaps_op = { | ||
548 | .start = m_start, | ||
549 | .next = m_next, | ||
550 | .stop = m_stop, | ||
551 | .show = show_tid_smap | ||
487 | }; | 552 | }; |
488 | 553 | ||
489 | static int smaps_open(struct inode *inode, struct file *file) | 554 | static int pid_smaps_open(struct inode *inode, struct file *file) |
490 | { | 555 | { |
491 | return do_maps_open(inode, file, &proc_pid_smaps_op); | 556 | return do_maps_open(inode, file, &proc_pid_smaps_op); |
492 | } | 557 | } |
493 | 558 | ||
494 | const struct file_operations proc_smaps_operations = { | 559 | static int tid_smaps_open(struct inode *inode, struct file *file) |
495 | .open = smaps_open, | 560 | { |
561 | return do_maps_open(inode, file, &proc_tid_smaps_op); | ||
562 | } | ||
563 | |||
564 | const struct file_operations proc_pid_smaps_operations = { | ||
565 | .open = pid_smaps_open, | ||
566 | .read = seq_read, | ||
567 | .llseek = seq_lseek, | ||
568 | .release = seq_release_private, | ||
569 | }; | ||
570 | |||
571 | const struct file_operations proc_tid_smaps_operations = { | ||
572 | .open = tid_smaps_open, | ||
496 | .read = seq_read, | 573 | .read = seq_read, |
497 | .llseek = seq_lseek, | 574 | .llseek = seq_lseek, |
498 | .release = seq_release_private, | 575 | .release = seq_release_private, |
@@ -507,6 +584,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
507 | struct page *page; | 584 | struct page *page; |
508 | 585 | ||
509 | split_huge_page_pmd(walk->mm, pmd); | 586 | split_huge_page_pmd(walk->mm, pmd); |
587 | if (pmd_trans_unstable(pmd)) | ||
588 | return 0; | ||
510 | 589 | ||
511 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 590 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
512 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 591 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
@@ -598,11 +677,18 @@ const struct file_operations proc_clear_refs_operations = { | |||
598 | .llseek = noop_llseek, | 677 | .llseek = noop_llseek, |
599 | }; | 678 | }; |
600 | 679 | ||
680 | typedef struct { | ||
681 | u64 pme; | ||
682 | } pagemap_entry_t; | ||
683 | |||
601 | struct pagemapread { | 684 | struct pagemapread { |
602 | int pos, len; | 685 | int pos, len; |
603 | u64 *buffer; | 686 | pagemap_entry_t *buffer; |
604 | }; | 687 | }; |
605 | 688 | ||
689 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | ||
690 | #define PAGEMAP_WALK_MASK (PMD_MASK) | ||
691 | |||
606 | #define PM_ENTRY_BYTES sizeof(u64) | 692 | #define PM_ENTRY_BYTES sizeof(u64) |
607 | #define PM_STATUS_BITS 3 | 693 | #define PM_STATUS_BITS 3 |
608 | #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) | 694 | #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) |
@@ -620,10 +706,15 @@ struct pagemapread { | |||
620 | #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) | 706 | #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) |
621 | #define PM_END_OF_BUFFER 1 | 707 | #define PM_END_OF_BUFFER 1 |
622 | 708 | ||
623 | static int add_to_pagemap(unsigned long addr, u64 pfn, | 709 | static inline pagemap_entry_t make_pme(u64 val) |
710 | { | ||
711 | return (pagemap_entry_t) { .pme = val }; | ||
712 | } | ||
713 | |||
714 | static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, | ||
624 | struct pagemapread *pm) | 715 | struct pagemapread *pm) |
625 | { | 716 | { |
626 | pm->buffer[pm->pos++] = pfn; | 717 | pm->buffer[pm->pos++] = *pme; |
627 | if (pm->pos >= pm->len) | 718 | if (pm->pos >= pm->len) |
628 | return PM_END_OF_BUFFER; | 719 | return PM_END_OF_BUFFER; |
629 | return 0; | 720 | return 0; |
@@ -635,8 +726,10 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, | |||
635 | struct pagemapread *pm = walk->private; | 726 | struct pagemapread *pm = walk->private; |
636 | unsigned long addr; | 727 | unsigned long addr; |
637 | int err = 0; | 728 | int err = 0; |
729 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); | ||
730 | |||
638 | for (addr = start; addr < end; addr += PAGE_SIZE) { | 731 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
639 | err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); | 732 | err = add_to_pagemap(addr, &pme, pm); |
640 | if (err) | 733 | if (err) |
641 | break; | 734 | break; |
642 | } | 735 | } |
@@ -649,17 +742,35 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte) | |||
649 | return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); | 742 | return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); |
650 | } | 743 | } |
651 | 744 | ||
652 | static u64 pte_to_pagemap_entry(pte_t pte) | 745 | static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte) |
653 | { | 746 | { |
654 | u64 pme = 0; | ||
655 | if (is_swap_pte(pte)) | 747 | if (is_swap_pte(pte)) |
656 | pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) | 748 | *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte)) |
657 | | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; | 749 | | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP); |
658 | else if (pte_present(pte)) | 750 | else if (pte_present(pte)) |
659 | pme = PM_PFRAME(pte_pfn(pte)) | 751 | *pme = make_pme(PM_PFRAME(pte_pfn(pte)) |
660 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; | 752 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); |
661 | return pme; | 753 | } |
754 | |||
755 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
756 | static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, | ||
757 | pmd_t pmd, int offset) | ||
758 | { | ||
759 | /* | ||
760 | * Currently pmd for thp is always present because thp can not be | ||
761 | * swapped-out, migrated, or HWPOISONed (split in such cases instead.) | ||
762 | * This if-check is just to prepare for future implementation. | ||
763 | */ | ||
764 | if (pmd_present(pmd)) | ||
765 | *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) | ||
766 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); | ||
662 | } | 767 | } |
768 | #else | ||
769 | static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, | ||
770 | pmd_t pmd, int offset) | ||
771 | { | ||
772 | } | ||
773 | #endif | ||
663 | 774 | ||
664 | static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | 775 | static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
665 | struct mm_walk *walk) | 776 | struct mm_walk *walk) |
@@ -668,13 +779,30 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
668 | struct pagemapread *pm = walk->private; | 779 | struct pagemapread *pm = walk->private; |
669 | pte_t *pte; | 780 | pte_t *pte; |
670 | int err = 0; | 781 | int err = 0; |
782 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); | ||
671 | 783 | ||
672 | split_huge_page_pmd(walk->mm, pmd); | 784 | if (pmd_trans_unstable(pmd)) |
785 | return 0; | ||
673 | 786 | ||
674 | /* find the first VMA at or above 'addr' */ | 787 | /* find the first VMA at or above 'addr' */ |
675 | vma = find_vma(walk->mm, addr); | 788 | vma = find_vma(walk->mm, addr); |
789 | spin_lock(&walk->mm->page_table_lock); | ||
790 | if (pmd_trans_huge_lock(pmd, vma) == 1) { | ||
791 | for (; addr != end; addr += PAGE_SIZE) { | ||
792 | unsigned long offset; | ||
793 | |||
794 | offset = (addr & ~PAGEMAP_WALK_MASK) >> | ||
795 | PAGE_SHIFT; | ||
796 | thp_pmd_to_pagemap_entry(&pme, *pmd, offset); | ||
797 | err = add_to_pagemap(addr, &pme, pm); | ||
798 | if (err) | ||
799 | break; | ||
800 | } | ||
801 | spin_unlock(&walk->mm->page_table_lock); | ||
802 | return err; | ||
803 | } | ||
804 | |||
676 | for (; addr != end; addr += PAGE_SIZE) { | 805 | for (; addr != end; addr += PAGE_SIZE) { |
677 | u64 pfn = PM_NOT_PRESENT; | ||
678 | 806 | ||
679 | /* check to see if we've left 'vma' behind | 807 | /* check to see if we've left 'vma' behind |
680 | * and need a new, higher one */ | 808 | * and need a new, higher one */ |
@@ -686,11 +814,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
686 | if (vma && (vma->vm_start <= addr) && | 814 | if (vma && (vma->vm_start <= addr) && |
687 | !is_vm_hugetlb_page(vma)) { | 815 | !is_vm_hugetlb_page(vma)) { |
688 | pte = pte_offset_map(pmd, addr); | 816 | pte = pte_offset_map(pmd, addr); |
689 | pfn = pte_to_pagemap_entry(*pte); | 817 | pte_to_pagemap_entry(&pme, *pte); |
690 | /* unmap before userspace copy */ | 818 | /* unmap before userspace copy */ |
691 | pte_unmap(pte); | 819 | pte_unmap(pte); |
692 | } | 820 | } |
693 | err = add_to_pagemap(addr, pfn, pm); | 821 | err = add_to_pagemap(addr, &pme, pm); |
694 | if (err) | 822 | if (err) |
695 | return err; | 823 | return err; |
696 | } | 824 | } |
@@ -701,13 +829,12 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
701 | } | 829 | } |
702 | 830 | ||
703 | #ifdef CONFIG_HUGETLB_PAGE | 831 | #ifdef CONFIG_HUGETLB_PAGE |
704 | static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) | 832 | static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, |
833 | pte_t pte, int offset) | ||
705 | { | 834 | { |
706 | u64 pme = 0; | ||
707 | if (pte_present(pte)) | 835 | if (pte_present(pte)) |
708 | pme = PM_PFRAME(pte_pfn(pte) + offset) | 836 | *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) |
709 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; | 837 | | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); |
710 | return pme; | ||
711 | } | 838 | } |
712 | 839 | ||
713 | /* This function walks within one hugetlb entry in the single call */ | 840 | /* This function walks within one hugetlb entry in the single call */ |
@@ -717,12 +844,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | |||
717 | { | 844 | { |
718 | struct pagemapread *pm = walk->private; | 845 | struct pagemapread *pm = walk->private; |
719 | int err = 0; | 846 | int err = 0; |
720 | u64 pfn; | 847 | pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); |
721 | 848 | ||
722 | for (; addr != end; addr += PAGE_SIZE) { | 849 | for (; addr != end; addr += PAGE_SIZE) { |
723 | int offset = (addr & ~hmask) >> PAGE_SHIFT; | 850 | int offset = (addr & ~hmask) >> PAGE_SHIFT; |
724 | pfn = huge_pte_to_pagemap_entry(*pte, offset); | 851 | huge_pte_to_pagemap_entry(&pme, *pte, offset); |
725 | err = add_to_pagemap(addr, pfn, pm); | 852 | err = add_to_pagemap(addr, &pme, pm); |
726 | if (err) | 853 | if (err) |
727 | return err; | 854 | return err; |
728 | } | 855 | } |
@@ -757,8 +884,6 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, | |||
757 | * determine which areas of memory are actually mapped and llseek to | 884 | * determine which areas of memory are actually mapped and llseek to |
758 | * skip over unmapped regions. | 885 | * skip over unmapped regions. |
759 | */ | 886 | */ |
760 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | ||
761 | #define PAGEMAP_WALK_MASK (PMD_MASK) | ||
762 | static ssize_t pagemap_read(struct file *file, char __user *buf, | 887 | static ssize_t pagemap_read(struct file *file, char __user *buf, |
763 | size_t count, loff_t *ppos) | 888 | size_t count, loff_t *ppos) |
764 | { | 889 | { |
@@ -941,26 +1066,21 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | |||
941 | pte_t *pte; | 1066 | pte_t *pte; |
942 | 1067 | ||
943 | md = walk->private; | 1068 | md = walk->private; |
944 | spin_lock(&walk->mm->page_table_lock); | 1069 | |
945 | if (pmd_trans_huge(*pmd)) { | 1070 | if (pmd_trans_huge_lock(pmd, md->vma) == 1) { |
946 | if (pmd_trans_splitting(*pmd)) { | 1071 | pte_t huge_pte = *(pte_t *)pmd; |
947 | spin_unlock(&walk->mm->page_table_lock); | 1072 | struct page *page; |
948 | wait_split_huge_page(md->vma->anon_vma, pmd); | 1073 | |
949 | } else { | 1074 | page = can_gather_numa_stats(huge_pte, md->vma, addr); |
950 | pte_t huge_pte = *(pte_t *)pmd; | 1075 | if (page) |
951 | struct page *page; | 1076 | gather_stats(page, md, pte_dirty(huge_pte), |
952 | 1077 | HPAGE_PMD_SIZE/PAGE_SIZE); | |
953 | page = can_gather_numa_stats(huge_pte, md->vma, addr); | ||
954 | if (page) | ||
955 | gather_stats(page, md, pte_dirty(huge_pte), | ||
956 | HPAGE_PMD_SIZE/PAGE_SIZE); | ||
957 | spin_unlock(&walk->mm->page_table_lock); | ||
958 | return 0; | ||
959 | } | ||
960 | } else { | ||
961 | spin_unlock(&walk->mm->page_table_lock); | 1078 | spin_unlock(&walk->mm->page_table_lock); |
1079 | return 0; | ||
962 | } | 1080 | } |
963 | 1081 | ||
1082 | if (pmd_trans_unstable(pmd)) | ||
1083 | return 0; | ||
964 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | 1084 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); |
965 | do { | 1085 | do { |
966 | struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | 1086 | struct page *page = can_gather_numa_stats(*pte, md->vma, addr); |
@@ -1002,7 +1122,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, | |||
1002 | /* | 1122 | /* |
1003 | * Display pages allocated per node and memory policy via /proc. | 1123 | * Display pages allocated per node and memory policy via /proc. |
1004 | */ | 1124 | */ |
1005 | static int show_numa_map(struct seq_file *m, void *v) | 1125 | static int show_numa_map(struct seq_file *m, void *v, int is_pid) |
1006 | { | 1126 | { |
1007 | struct numa_maps_private *numa_priv = m->private; | 1127 | struct numa_maps_private *numa_priv = m->private; |
1008 | struct proc_maps_private *proc_priv = &numa_priv->proc_maps; | 1128 | struct proc_maps_private *proc_priv = &numa_priv->proc_maps; |
@@ -1039,9 +1159,19 @@ static int show_numa_map(struct seq_file *m, void *v) | |||
1039 | seq_path(m, &file->f_path, "\n\t= "); | 1159 | seq_path(m, &file->f_path, "\n\t= "); |
1040 | } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { | 1160 | } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { |
1041 | seq_printf(m, " heap"); | 1161 | seq_printf(m, " heap"); |
1042 | } else if (vma->vm_start <= mm->start_stack && | 1162 | } else { |
1043 | vma->vm_end >= mm->start_stack) { | 1163 | pid_t tid = vm_is_stack(proc_priv->task, vma, is_pid); |
1044 | seq_printf(m, " stack"); | 1164 | if (tid != 0) { |
1165 | /* | ||
1166 | * Thread stack in /proc/PID/task/TID/maps or | ||
1167 | * the main process stack. | ||
1168 | */ | ||
1169 | if (!is_pid || (vma->vm_start <= mm->start_stack && | ||
1170 | vma->vm_end >= mm->start_stack)) | ||
1171 | seq_printf(m, " stack"); | ||
1172 | else | ||
1173 | seq_printf(m, " stack:%d", tid); | ||
1174 | } | ||
1045 | } | 1175 | } |
1046 | 1176 | ||
1047 | if (is_vm_hugetlb_page(vma)) | 1177 | if (is_vm_hugetlb_page(vma)) |
@@ -1084,21 +1214,39 @@ out: | |||
1084 | return 0; | 1214 | return 0; |
1085 | } | 1215 | } |
1086 | 1216 | ||
1217 | static int show_pid_numa_map(struct seq_file *m, void *v) | ||
1218 | { | ||
1219 | return show_numa_map(m, v, 1); | ||
1220 | } | ||
1221 | |||
1222 | static int show_tid_numa_map(struct seq_file *m, void *v) | ||
1223 | { | ||
1224 | return show_numa_map(m, v, 0); | ||
1225 | } | ||
1226 | |||
1087 | static const struct seq_operations proc_pid_numa_maps_op = { | 1227 | static const struct seq_operations proc_pid_numa_maps_op = { |
1088 | .start = m_start, | 1228 | .start = m_start, |
1089 | .next = m_next, | 1229 | .next = m_next, |
1090 | .stop = m_stop, | 1230 | .stop = m_stop, |
1091 | .show = show_numa_map, | 1231 | .show = show_pid_numa_map, |
1232 | }; | ||
1233 | |||
1234 | static const struct seq_operations proc_tid_numa_maps_op = { | ||
1235 | .start = m_start, | ||
1236 | .next = m_next, | ||
1237 | .stop = m_stop, | ||
1238 | .show = show_tid_numa_map, | ||
1092 | }; | 1239 | }; |
1093 | 1240 | ||
1094 | static int numa_maps_open(struct inode *inode, struct file *file) | 1241 | static int numa_maps_open(struct inode *inode, struct file *file, |
1242 | const struct seq_operations *ops) | ||
1095 | { | 1243 | { |
1096 | struct numa_maps_private *priv; | 1244 | struct numa_maps_private *priv; |
1097 | int ret = -ENOMEM; | 1245 | int ret = -ENOMEM; |
1098 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 1246 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
1099 | if (priv) { | 1247 | if (priv) { |
1100 | priv->proc_maps.pid = proc_pid(inode); | 1248 | priv->proc_maps.pid = proc_pid(inode); |
1101 | ret = seq_open(file, &proc_pid_numa_maps_op); | 1249 | ret = seq_open(file, ops); |
1102 | if (!ret) { | 1250 | if (!ret) { |
1103 | struct seq_file *m = file->private_data; | 1251 | struct seq_file *m = file->private_data; |
1104 | m->private = priv; | 1252 | m->private = priv; |
@@ -1109,8 +1257,25 @@ static int numa_maps_open(struct inode *inode, struct file *file) | |||
1109 | return ret; | 1257 | return ret; |
1110 | } | 1258 | } |
1111 | 1259 | ||
1112 | const struct file_operations proc_numa_maps_operations = { | 1260 | static int pid_numa_maps_open(struct inode *inode, struct file *file) |
1113 | .open = numa_maps_open, | 1261 | { |
1262 | return numa_maps_open(inode, file, &proc_pid_numa_maps_op); | ||
1263 | } | ||
1264 | |||
1265 | static int tid_numa_maps_open(struct inode *inode, struct file *file) | ||
1266 | { | ||
1267 | return numa_maps_open(inode, file, &proc_tid_numa_maps_op); | ||
1268 | } | ||
1269 | |||
1270 | const struct file_operations proc_pid_numa_maps_operations = { | ||
1271 | .open = pid_numa_maps_open, | ||
1272 | .read = seq_read, | ||
1273 | .llseek = seq_lseek, | ||
1274 | .release = seq_release_private, | ||
1275 | }; | ||
1276 | |||
1277 | const struct file_operations proc_tid_numa_maps_operations = { | ||
1278 | .open = tid_numa_maps_open, | ||
1114 | .read = seq_read, | 1279 | .read = seq_read, |
1115 | .llseek = seq_lseek, | 1280 | .llseek = seq_lseek, |
1116 | .release = seq_release_private, | 1281 | .release = seq_release_private, |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 980de547c070..74fe164d1b23 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -134,9 +134,11 @@ static void pad_len_spaces(struct seq_file *m, int len) | |||
134 | /* | 134 | /* |
135 | * display a single VMA to a sequenced file | 135 | * display a single VMA to a sequenced file |
136 | */ | 136 | */ |
137 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | 137 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, |
138 | int is_pid) | ||
138 | { | 139 | { |
139 | struct mm_struct *mm = vma->vm_mm; | 140 | struct mm_struct *mm = vma->vm_mm; |
141 | struct proc_maps_private *priv = m->private; | ||
140 | unsigned long ino = 0; | 142 | unsigned long ino = 0; |
141 | struct file *file; | 143 | struct file *file; |
142 | dev_t dev = 0; | 144 | dev_t dev = 0; |
@@ -168,10 +170,19 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | |||
168 | pad_len_spaces(m, len); | 170 | pad_len_spaces(m, len); |
169 | seq_path(m, &file->f_path, ""); | 171 | seq_path(m, &file->f_path, ""); |
170 | } else if (mm) { | 172 | } else if (mm) { |
171 | if (vma->vm_start <= mm->start_stack && | 173 | pid_t tid = vm_is_stack(priv->task, vma, is_pid); |
172 | vma->vm_end >= mm->start_stack) { | 174 | |
175 | if (tid != 0) { | ||
173 | pad_len_spaces(m, len); | 176 | pad_len_spaces(m, len); |
174 | seq_puts(m, "[stack]"); | 177 | /* |
178 | * Thread stack in /proc/PID/task/TID/maps or | ||
179 | * the main process stack. | ||
180 | */ | ||
181 | if (!is_pid || (vma->vm_start <= mm->start_stack && | ||
182 | vma->vm_end >= mm->start_stack)) | ||
183 | seq_printf(m, "[stack]"); | ||
184 | else | ||
185 | seq_printf(m, "[stack:%d]", tid); | ||
175 | } | 186 | } |
176 | } | 187 | } |
177 | 188 | ||
@@ -182,11 +193,22 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | |||
182 | /* | 193 | /* |
183 | * display mapping lines for a particular process's /proc/pid/maps | 194 | * display mapping lines for a particular process's /proc/pid/maps |
184 | */ | 195 | */ |
185 | static int show_map(struct seq_file *m, void *_p) | 196 | static int show_map(struct seq_file *m, void *_p, int is_pid) |
186 | { | 197 | { |
187 | struct rb_node *p = _p; | 198 | struct rb_node *p = _p; |
188 | 199 | ||
189 | return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb)); | 200 | return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb), |
201 | is_pid); | ||
202 | } | ||
203 | |||
204 | static int show_pid_map(struct seq_file *m, void *_p) | ||
205 | { | ||
206 | return show_map(m, _p, 1); | ||
207 | } | ||
208 | |||
209 | static int show_tid_map(struct seq_file *m, void *_p) | ||
210 | { | ||
211 | return show_map(m, _p, 0); | ||
190 | } | 212 | } |
191 | 213 | ||
192 | static void *m_start(struct seq_file *m, loff_t *pos) | 214 | static void *m_start(struct seq_file *m, loff_t *pos) |
@@ -240,10 +262,18 @@ static const struct seq_operations proc_pid_maps_ops = { | |||
240 | .start = m_start, | 262 | .start = m_start, |
241 | .next = m_next, | 263 | .next = m_next, |
242 | .stop = m_stop, | 264 | .stop = m_stop, |
243 | .show = show_map | 265 | .show = show_pid_map |
266 | }; | ||
267 | |||
268 | static const struct seq_operations proc_tid_maps_ops = { | ||
269 | .start = m_start, | ||
270 | .next = m_next, | ||
271 | .stop = m_stop, | ||
272 | .show = show_tid_map | ||
244 | }; | 273 | }; |
245 | 274 | ||
246 | static int maps_open(struct inode *inode, struct file *file) | 275 | static int maps_open(struct inode *inode, struct file *file, |
276 | const struct seq_operations *ops) | ||
247 | { | 277 | { |
248 | struct proc_maps_private *priv; | 278 | struct proc_maps_private *priv; |
249 | int ret = -ENOMEM; | 279 | int ret = -ENOMEM; |
@@ -251,7 +281,7 @@ static int maps_open(struct inode *inode, struct file *file) | |||
251 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 281 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
252 | if (priv) { | 282 | if (priv) { |
253 | priv->pid = proc_pid(inode); | 283 | priv->pid = proc_pid(inode); |
254 | ret = seq_open(file, &proc_pid_maps_ops); | 284 | ret = seq_open(file, ops); |
255 | if (!ret) { | 285 | if (!ret) { |
256 | struct seq_file *m = file->private_data; | 286 | struct seq_file *m = file->private_data; |
257 | m->private = priv; | 287 | m->private = priv; |
@@ -262,8 +292,25 @@ static int maps_open(struct inode *inode, struct file *file) | |||
262 | return ret; | 292 | return ret; |
263 | } | 293 | } |
264 | 294 | ||
265 | const struct file_operations proc_maps_operations = { | 295 | static int pid_maps_open(struct inode *inode, struct file *file) |
266 | .open = maps_open, | 296 | { |
297 | return maps_open(inode, file, &proc_pid_maps_ops); | ||
298 | } | ||
299 | |||
300 | static int tid_maps_open(struct inode *inode, struct file *file) | ||
301 | { | ||
302 | return maps_open(inode, file, &proc_tid_maps_ops); | ||
303 | } | ||
304 | |||
305 | const struct file_operations proc_pid_maps_operations = { | ||
306 | .open = pid_maps_open, | ||
307 | .read = seq_read, | ||
308 | .llseek = seq_lseek, | ||
309 | .release = seq_release_private, | ||
310 | }; | ||
311 | |||
312 | const struct file_operations proc_tid_maps_operations = { | ||
313 | .open = tid_maps_open, | ||
267 | .read = seq_read, | 314 | .read = seq_read, |
268 | .llseek = seq_lseek, | 315 | .llseek = seq_lseek, |
269 | .release = seq_release_private, | 316 | .release = seq_release_private, |
diff --git a/fs/seq_file.c b/fs/seq_file.c index 4023d6be939b..aa242dc99373 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
@@ -140,9 +140,21 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) | |||
140 | 140 | ||
141 | mutex_lock(&m->lock); | 141 | mutex_lock(&m->lock); |
142 | 142 | ||
143 | /* | ||
144 | * seq_file->op->..m_start/m_stop/m_next may do special actions | ||
145 | * or optimisations based on the file->f_version, so we want to | ||
146 | * pass the file->f_version to those methods. | ||
147 | * | ||
148 | * seq_file->version is just copy of f_version, and seq_file | ||
149 | * methods can treat it simply as file version. | ||
150 | * It is copied in first and copied out after all operations. | ||
151 | * It is convenient to have it as part of structure to avoid the | ||
152 | * need of passing another argument to all the seq_file methods. | ||
153 | */ | ||
154 | m->version = file->f_version; | ||
155 | |||
143 | /* Don't assume *ppos is where we left it */ | 156 | /* Don't assume *ppos is where we left it */ |
144 | if (unlikely(*ppos != m->read_pos)) { | 157 | if (unlikely(*ppos != m->read_pos)) { |
145 | m->read_pos = *ppos; | ||
146 | while ((err = traverse(m, *ppos)) == -EAGAIN) | 158 | while ((err = traverse(m, *ppos)) == -EAGAIN) |
147 | ; | 159 | ; |
148 | if (err) { | 160 | if (err) { |
@@ -152,21 +164,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) | |||
152 | m->index = 0; | 164 | m->index = 0; |
153 | m->count = 0; | 165 | m->count = 0; |
154 | goto Done; | 166 | goto Done; |
167 | } else { | ||
168 | m->read_pos = *ppos; | ||
155 | } | 169 | } |
156 | } | 170 | } |
157 | 171 | ||
158 | /* | ||
159 | * seq_file->op->..m_start/m_stop/m_next may do special actions | ||
160 | * or optimisations based on the file->f_version, so we want to | ||
161 | * pass the file->f_version to those methods. | ||
162 | * | ||
163 | * seq_file->version is just copy of f_version, and seq_file | ||
164 | * methods can treat it simply as file version. | ||
165 | * It is copied in first and copied out after all operations. | ||
166 | * It is convenient to have it as part of structure to avoid the | ||
167 | * need of passing another argument to all the seq_file methods. | ||
168 | */ | ||
169 | m->version = file->f_version; | ||
170 | /* grab buffer if we didn't have one */ | 172 | /* grab buffer if we didn't have one */ |
171 | if (!m->buf) { | 173 | if (!m->buf) { |
172 | m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); | 174 | m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); |