diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:15 -0400 |
commit | 9e2d8656f5e8aa214e66b462680cf86b210b74a8 (patch) | |
tree | f67d62e896cedf75599ea45f9ecf9999c6ad24cd /fs | |
parent | 1ea4f4f8405cc1ceec23f2d261bc3775785e6712 (diff) | |
parent | 9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge patches from Andrew Morton:
"A few misc things and very nearly all of the MM tree. A tremendous
amount of stuff (again), including a significant rbtree library
rework."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (160 commits)
sparc64: Support transparent huge pages.
mm: thp: Use more portable PMD clearing sequenece in zap_huge_pmd().
mm: Add and use update_mmu_cache_pmd() in transparent huge page code.
sparc64: Document PGD and PMD layout.
sparc64: Eliminate PTE table memory wastage.
sparc64: Halve the size of PTE tables
sparc64: Only support 4MB huge pages and 8KB base pages.
memory-hotplug: suppress "Trying to free nonexistent resource <XXXXXXXXXXXXXXXX-YYYYYYYYYYYYYYYY>" warning
mm: memcg: clean up mm_match_cgroup() signature
mm: document PageHuge somewhat
mm: use %pK for /proc/vmallocinfo
mm, thp: fix mlock statistics
mm, thp: fix mapped pages avoiding unevictable list on mlock
memory-hotplug: update memory block's state and notify userspace
memory-hotplug: preparation to notify memory block's state at memory hot remove
mm: avoid section mismatch warning for memblock_type_name
make GFP_NOTRACK definition unconditional
cma: decrease cc.nr_migratepages after reclaiming pagelist
CMA: migrate mlocked pages
kpageflags: fix wrong KPF_THP on non-huge compound pages
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/9p/vfs_file.c | 1 | ||||
-rw-r--r-- | fs/binfmt_elf.c | 4 | ||||
-rw-r--r-- | fs/binfmt_elf_fdpic.c | 2 | ||||
-rw-r--r-- | fs/btrfs/file.c | 2 | ||||
-rw-r--r-- | fs/ceph/addr.c | 2 | ||||
-rw-r--r-- | fs/cifs/file.c | 1 | ||||
-rw-r--r-- | fs/exec.c | 2 | ||||
-rw-r--r-- | fs/ext4/file.c | 2 | ||||
-rw-r--r-- | fs/fs-writeback.c | 7 | ||||
-rw-r--r-- | fs/fuse/file.c | 1 | ||||
-rw-r--r-- | fs/gfs2/file.c | 2 | ||||
-rw-r--r-- | fs/hugetlbfs/inode.c | 11 | ||||
-rw-r--r-- | fs/inode.c | 2 | ||||
-rw-r--r-- | fs/jffs2/readinode.c | 13 | ||||
-rw-r--r-- | fs/nfs/file.c | 1 | ||||
-rw-r--r-- | fs/nilfs2/file.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/mmap.c | 2 | ||||
-rw-r--r-- | fs/proc/base.c | 117 | ||||
-rw-r--r-- | fs/proc/page.c | 8 | ||||
-rw-r--r-- | fs/proc/proc_sysctl.c | 5 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 2 | ||||
-rw-r--r-- | fs/ubifs/file.c | 1 | ||||
-rw-r--r-- | fs/xfs/xfs_file.c | 2 |
23 files changed, 44 insertions, 148 deletions
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index dd6f7ee1e312..c2483e97beee 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -738,6 +738,7 @@ v9fs_cached_file_write(struct file *filp, const char __user * data, | |||
738 | static const struct vm_operations_struct v9fs_file_vm_ops = { | 738 | static const struct vm_operations_struct v9fs_file_vm_ops = { |
739 | .fault = filemap_fault, | 739 | .fault = filemap_fault, |
740 | .page_mkwrite = v9fs_vm_page_mkwrite, | 740 | .page_mkwrite = v9fs_vm_page_mkwrite, |
741 | .remap_pages = generic_file_remap_pages, | ||
741 | }; | 742 | }; |
742 | 743 | ||
743 | 744 | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 28a64e769527..e800dec958c3 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1123,7 +1123,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, | |||
1123 | if (always_dump_vma(vma)) | 1123 | if (always_dump_vma(vma)) |
1124 | goto whole; | 1124 | goto whole; |
1125 | 1125 | ||
1126 | if (vma->vm_flags & VM_NODUMP) | 1126 | if (vma->vm_flags & VM_DONTDUMP) |
1127 | return 0; | 1127 | return 0; |
1128 | 1128 | ||
1129 | /* Hugetlb memory check */ | 1129 | /* Hugetlb memory check */ |
@@ -1135,7 +1135,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, | |||
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | /* Do not dump I/O mapped devices or special mappings */ | 1137 | /* Do not dump I/O mapped devices or special mappings */ |
1138 | if (vma->vm_flags & (VM_IO | VM_RESERVED)) | 1138 | if (vma->vm_flags & VM_IO) |
1139 | return 0; | 1139 | return 0; |
1140 | 1140 | ||
1141 | /* By default, dump shared memory if mapped from an anonymous file. */ | 1141 | /* By default, dump shared memory if mapped from an anonymous file. */ |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 08d812b32282..262db114ff01 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
@@ -1205,7 +1205,7 @@ static int maydump(struct vm_area_struct *vma, unsigned long mm_flags) | |||
1205 | int dump_ok; | 1205 | int dump_ok; |
1206 | 1206 | ||
1207 | /* Do not dump I/O mapped devices or special mappings */ | 1207 | /* Do not dump I/O mapped devices or special mappings */ |
1208 | if (vma->vm_flags & (VM_IO | VM_RESERVED)) { | 1208 | if (vma->vm_flags & VM_IO) { |
1209 | kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags); | 1209 | kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags); |
1210 | return 0; | 1210 | return 0; |
1211 | } | 1211 | } |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 5caf285c6e4d..f6b40e86121b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1599,6 +1599,7 @@ out: | |||
1599 | static const struct vm_operations_struct btrfs_file_vm_ops = { | 1599 | static const struct vm_operations_struct btrfs_file_vm_ops = { |
1600 | .fault = filemap_fault, | 1600 | .fault = filemap_fault, |
1601 | .page_mkwrite = btrfs_page_mkwrite, | 1601 | .page_mkwrite = btrfs_page_mkwrite, |
1602 | .remap_pages = generic_file_remap_pages, | ||
1602 | }; | 1603 | }; |
1603 | 1604 | ||
1604 | static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) | 1605 | static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) |
@@ -1610,7 +1611,6 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) | |||
1610 | 1611 | ||
1611 | file_accessed(filp); | 1612 | file_accessed(filp); |
1612 | vma->vm_ops = &btrfs_file_vm_ops; | 1613 | vma->vm_ops = &btrfs_file_vm_ops; |
1613 | vma->vm_flags |= VM_CAN_NONLINEAR; | ||
1614 | 1614 | ||
1615 | return 0; | 1615 | return 0; |
1616 | } | 1616 | } |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 22b6e4583faa..6690269f5dde 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -1224,6 +1224,7 @@ out: | |||
1224 | static struct vm_operations_struct ceph_vmops = { | 1224 | static struct vm_operations_struct ceph_vmops = { |
1225 | .fault = filemap_fault, | 1225 | .fault = filemap_fault, |
1226 | .page_mkwrite = ceph_page_mkwrite, | 1226 | .page_mkwrite = ceph_page_mkwrite, |
1227 | .remap_pages = generic_file_remap_pages, | ||
1227 | }; | 1228 | }; |
1228 | 1229 | ||
1229 | int ceph_mmap(struct file *file, struct vm_area_struct *vma) | 1230 | int ceph_mmap(struct file *file, struct vm_area_struct *vma) |
@@ -1234,6 +1235,5 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma) | |||
1234 | return -ENOEXEC; | 1235 | return -ENOEXEC; |
1235 | file_accessed(file); | 1236 | file_accessed(file); |
1236 | vma->vm_ops = &ceph_vmops; | 1237 | vma->vm_ops = &ceph_vmops; |
1237 | vma->vm_flags |= VM_CAN_NONLINEAR; | ||
1238 | return 0; | 1238 | return 0; |
1239 | } | 1239 | } |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 7d7bbdc4c8e7..edb25b4bbb95 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3003,6 +3003,7 @@ cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
3003 | static struct vm_operations_struct cifs_file_vm_ops = { | 3003 | static struct vm_operations_struct cifs_file_vm_ops = { |
3004 | .fault = filemap_fault, | 3004 | .fault = filemap_fault, |
3005 | .page_mkwrite = cifs_page_mkwrite, | 3005 | .page_mkwrite = cifs_page_mkwrite, |
3006 | .remap_pages = generic_file_remap_pages, | ||
3006 | }; | 3007 | }; |
3007 | 3008 | ||
3008 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) | 3009 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) |
@@ -603,7 +603,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
603 | * process cleanup to remove whatever mess we made. | 603 | * process cleanup to remove whatever mess we made. |
604 | */ | 604 | */ |
605 | if (length != move_page_tables(vma, old_start, | 605 | if (length != move_page_tables(vma, old_start, |
606 | vma, new_start, length)) | 606 | vma, new_start, length, false)) |
607 | return -ENOMEM; | 607 | return -ENOMEM; |
608 | 608 | ||
609 | lru_add_drain(); | 609 | lru_add_drain(); |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index ca6f07afe601..bf3966bccd34 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -207,6 +207,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, | |||
207 | static const struct vm_operations_struct ext4_file_vm_ops = { | 207 | static const struct vm_operations_struct ext4_file_vm_ops = { |
208 | .fault = filemap_fault, | 208 | .fault = filemap_fault, |
209 | .page_mkwrite = ext4_page_mkwrite, | 209 | .page_mkwrite = ext4_page_mkwrite, |
210 | .remap_pages = generic_file_remap_pages, | ||
210 | }; | 211 | }; |
211 | 212 | ||
212 | static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) | 213 | static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) |
@@ -217,7 +218,6 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
217 | return -ENOEXEC; | 218 | return -ENOEXEC; |
218 | file_accessed(file); | 219 | file_accessed(file); |
219 | vma->vm_ops = &ext4_file_vm_ops; | 220 | vma->vm_ops = &ext4_file_vm_ops; |
220 | vma->vm_flags |= VM_CAN_NONLINEAR; | ||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8e1d7b9e4a33..401b6c6248ae 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -439,8 +439,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, | |||
439 | * setting I_SYNC flag and calling inode_sync_complete() to clear it. | 439 | * setting I_SYNC flag and calling inode_sync_complete() to clear it. |
440 | */ | 440 | */ |
441 | static int | 441 | static int |
442 | __writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, | 442 | __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
443 | struct writeback_control *wbc) | ||
444 | { | 443 | { |
445 | struct address_space *mapping = inode->i_mapping; | 444 | struct address_space *mapping = inode->i_mapping; |
446 | long nr_to_write = wbc->nr_to_write; | 445 | long nr_to_write = wbc->nr_to_write; |
@@ -527,7 +526,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, | |||
527 | inode->i_state |= I_SYNC; | 526 | inode->i_state |= I_SYNC; |
528 | spin_unlock(&inode->i_lock); | 527 | spin_unlock(&inode->i_lock); |
529 | 528 | ||
530 | ret = __writeback_single_inode(inode, wb, wbc); | 529 | ret = __writeback_single_inode(inode, wbc); |
531 | 530 | ||
532 | spin_lock(&wb->list_lock); | 531 | spin_lock(&wb->list_lock); |
533 | spin_lock(&inode->i_lock); | 532 | spin_lock(&inode->i_lock); |
@@ -670,7 +669,7 @@ static long writeback_sb_inodes(struct super_block *sb, | |||
670 | * We use I_SYNC to pin the inode in memory. While it is set | 669 | * We use I_SYNC to pin the inode in memory. While it is set |
671 | * evict_inode() will wait so the inode cannot be freed. | 670 | * evict_inode() will wait so the inode cannot be freed. |
672 | */ | 671 | */ |
673 | __writeback_single_inode(inode, wb, &wbc); | 672 | __writeback_single_inode(inode, &wbc); |
674 | 673 | ||
675 | work->nr_pages -= write_chunk - wbc.nr_to_write; | 674 | work->nr_pages -= write_chunk - wbc.nr_to_write; |
676 | wrote += write_chunk - wbc.nr_to_write; | 675 | wrote += write_chunk - wbc.nr_to_write; |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index aba15f1b7ad2..78d2837bc940 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1379,6 +1379,7 @@ static const struct vm_operations_struct fuse_file_vm_ops = { | |||
1379 | .close = fuse_vma_close, | 1379 | .close = fuse_vma_close, |
1380 | .fault = filemap_fault, | 1380 | .fault = filemap_fault, |
1381 | .page_mkwrite = fuse_page_mkwrite, | 1381 | .page_mkwrite = fuse_page_mkwrite, |
1382 | .remap_pages = generic_file_remap_pages, | ||
1382 | }; | 1383 | }; |
1383 | 1384 | ||
1384 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) | 1385 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 30e21997a1a1..0def0504afc1 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -492,6 +492,7 @@ out: | |||
492 | static const struct vm_operations_struct gfs2_vm_ops = { | 492 | static const struct vm_operations_struct gfs2_vm_ops = { |
493 | .fault = filemap_fault, | 493 | .fault = filemap_fault, |
494 | .page_mkwrite = gfs2_page_mkwrite, | 494 | .page_mkwrite = gfs2_page_mkwrite, |
495 | .remap_pages = generic_file_remap_pages, | ||
495 | }; | 496 | }; |
496 | 497 | ||
497 | /** | 498 | /** |
@@ -526,7 +527,6 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma) | |||
526 | return error; | 527 | return error; |
527 | } | 528 | } |
528 | vma->vm_ops = &gfs2_vm_ops; | 529 | vma->vm_ops = &gfs2_vm_ops; |
529 | vma->vm_flags |= VM_CAN_NONLINEAR; | ||
530 | 530 | ||
531 | return 0; | 531 | return 0; |
532 | } | 532 | } |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 9460120a5170..c5bc355d8243 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
110 | * way when do_mmap_pgoff unwinds (may be important on powerpc | 110 | * way when do_mmap_pgoff unwinds (may be important on powerpc |
111 | * and ia64). | 111 | * and ia64). |
112 | */ | 112 | */ |
113 | vma->vm_flags |= VM_HUGETLB | VM_RESERVED; | 113 | vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP; |
114 | vma->vm_ops = &hugetlb_vm_ops; | 114 | vma->vm_ops = &hugetlb_vm_ops; |
115 | 115 | ||
116 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) | 116 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
@@ -397,17 +397,16 @@ static void hugetlbfs_evict_inode(struct inode *inode) | |||
397 | } | 397 | } |
398 | 398 | ||
399 | static inline void | 399 | static inline void |
400 | hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff) | 400 | hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff) |
401 | { | 401 | { |
402 | struct vm_area_struct *vma; | 402 | struct vm_area_struct *vma; |
403 | struct prio_tree_iter iter; | ||
404 | 403 | ||
405 | vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) { | 404 | vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) { |
406 | unsigned long v_offset; | 405 | unsigned long v_offset; |
407 | 406 | ||
408 | /* | 407 | /* |
409 | * Can the expression below overflow on 32-bit arches? | 408 | * Can the expression below overflow on 32-bit arches? |
410 | * No, because the prio_tree returns us only those vmas | 409 | * No, because the interval tree returns us only those vmas |
411 | * which overlap the truncated area starting at pgoff, | 410 | * which overlap the truncated area starting at pgoff, |
412 | * and no vma on a 32-bit arch can span beyond the 4GB. | 411 | * and no vma on a 32-bit arch can span beyond the 4GB. |
413 | */ | 412 | */ |
@@ -432,7 +431,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) | |||
432 | 431 | ||
433 | i_size_write(inode, offset); | 432 | i_size_write(inode, offset); |
434 | mutex_lock(&mapping->i_mmap_mutex); | 433 | mutex_lock(&mapping->i_mmap_mutex); |
435 | if (!prio_tree_empty(&mapping->i_mmap)) | 434 | if (!RB_EMPTY_ROOT(&mapping->i_mmap)) |
436 | hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); | 435 | hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); |
437 | mutex_unlock(&mapping->i_mmap_mutex); | 436 | mutex_unlock(&mapping->i_mmap_mutex); |
438 | truncate_hugepages(inode, offset); | 437 | truncate_hugepages(inode, offset); |
diff --git a/fs/inode.c b/fs/inode.c index ac8d904b3f16..b03c71957246 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -348,7 +348,7 @@ void address_space_init_once(struct address_space *mapping) | |||
348 | mutex_init(&mapping->i_mmap_mutex); | 348 | mutex_init(&mapping->i_mmap_mutex); |
349 | INIT_LIST_HEAD(&mapping->private_list); | 349 | INIT_LIST_HEAD(&mapping->private_list); |
350 | spin_lock_init(&mapping->private_lock); | 350 | spin_lock_init(&mapping->private_lock); |
351 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); | 351 | mapping->i_mmap = RB_ROOT; |
352 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); | 352 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); |
353 | } | 353 | } |
354 | EXPORT_SYMBOL(address_space_init_once); | 354 | EXPORT_SYMBOL(address_space_init_once); |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 1ea349fff68b..ae81b01e6fd7 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -394,8 +394,11 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, | |||
394 | } | 394 | } |
395 | 395 | ||
396 | /* Trivial function to remove the last node in the tree. Which by definition | 396 | /* Trivial function to remove the last node in the tree. Which by definition |
397 | has no right-hand -- so can be removed just by making its only child (if | 397 | has no right-hand child — so can be removed just by making its left-hand |
398 | any) take its place under its parent. */ | 398 | child (if any) take its place under its parent. Since this is only done |
399 | when we're consuming the whole tree, there's no need to use rb_erase() | ||
400 | and let it worry about adjusting colours and balancing the tree. That | ||
401 | would just be a waste of time. */ | ||
399 | static void eat_last(struct rb_root *root, struct rb_node *node) | 402 | static void eat_last(struct rb_root *root, struct rb_node *node) |
400 | { | 403 | { |
401 | struct rb_node *parent = rb_parent(node); | 404 | struct rb_node *parent = rb_parent(node); |
@@ -412,12 +415,12 @@ static void eat_last(struct rb_root *root, struct rb_node *node) | |||
412 | link = &parent->rb_right; | 415 | link = &parent->rb_right; |
413 | 416 | ||
414 | *link = node->rb_left; | 417 | *link = node->rb_left; |
415 | /* Colour doesn't matter now. Only the parent pointer. */ | ||
416 | if (node->rb_left) | 418 | if (node->rb_left) |
417 | node->rb_left->rb_parent_color = node->rb_parent_color; | 419 | node->rb_left->__rb_parent_color = node->__rb_parent_color; |
418 | } | 420 | } |
419 | 421 | ||
420 | /* We put this in reverse order, so we can just use eat_last */ | 422 | /* We put the version tree in reverse order, so we can use the same eat_last() |
423 | function that we use to consume the tmpnode tree (tn_root). */ | ||
421 | static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn) | 424 | static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn) |
422 | { | 425 | { |
423 | struct rb_node **link = &ver_root->rb_node; | 426 | struct rb_node **link = &ver_root->rb_node; |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 6a7fcab7ecb3..f692be97676d 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -578,6 +578,7 @@ out: | |||
578 | static const struct vm_operations_struct nfs_file_vm_ops = { | 578 | static const struct vm_operations_struct nfs_file_vm_ops = { |
579 | .fault = filemap_fault, | 579 | .fault = filemap_fault, |
580 | .page_mkwrite = nfs_vm_page_mkwrite, | 580 | .page_mkwrite = nfs_vm_page_mkwrite, |
581 | .remap_pages = generic_file_remap_pages, | ||
581 | }; | 582 | }; |
582 | 583 | ||
583 | static int nfs_need_sync_write(struct file *filp, struct inode *inode) | 584 | static int nfs_need_sync_write(struct file *filp, struct inode *inode) |
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 5b387a4c293e..16f35f7423c5 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c | |||
@@ -135,13 +135,13 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
135 | static const struct vm_operations_struct nilfs_file_vm_ops = { | 135 | static const struct vm_operations_struct nilfs_file_vm_ops = { |
136 | .fault = filemap_fault, | 136 | .fault = filemap_fault, |
137 | .page_mkwrite = nilfs_page_mkwrite, | 137 | .page_mkwrite = nilfs_page_mkwrite, |
138 | .remap_pages = generic_file_remap_pages, | ||
138 | }; | 139 | }; |
139 | 140 | ||
140 | static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) | 141 | static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
141 | { | 142 | { |
142 | file_accessed(file); | 143 | file_accessed(file); |
143 | vma->vm_ops = &nilfs_file_vm_ops; | 144 | vma->vm_ops = &nilfs_file_vm_ops; |
144 | vma->vm_flags |= VM_CAN_NONLINEAR; | ||
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index d150372fd81d..47a87dda54ce 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c | |||
@@ -173,6 +173,7 @@ out: | |||
173 | static const struct vm_operations_struct ocfs2_file_vm_ops = { | 173 | static const struct vm_operations_struct ocfs2_file_vm_ops = { |
174 | .fault = ocfs2_fault, | 174 | .fault = ocfs2_fault, |
175 | .page_mkwrite = ocfs2_page_mkwrite, | 175 | .page_mkwrite = ocfs2_page_mkwrite, |
176 | .remap_pages = generic_file_remap_pages, | ||
176 | }; | 177 | }; |
177 | 178 | ||
178 | int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) | 179 | int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) |
@@ -188,7 +189,6 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) | |||
188 | ocfs2_inode_unlock(file->f_dentry->d_inode, lock_level); | 189 | ocfs2_inode_unlock(file->f_dentry->d_inode, lock_level); |
189 | out: | 190 | out: |
190 | vma->vm_ops = &ocfs2_file_vm_ops; | 191 | vma->vm_ops = &ocfs2_file_vm_ops; |
191 | vma->vm_flags |= VM_CAN_NONLINEAR; | ||
192 | return 0; | 192 | return 0; |
193 | } | 193 | } |
194 | 194 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index d295af993677..ef5c84be66f9 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -873,111 +873,6 @@ static const struct file_operations proc_environ_operations = { | |||
873 | .release = mem_release, | 873 | .release = mem_release, |
874 | }; | 874 | }; |
875 | 875 | ||
876 | static ssize_t oom_adjust_read(struct file *file, char __user *buf, | ||
877 | size_t count, loff_t *ppos) | ||
878 | { | ||
879 | struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); | ||
880 | char buffer[PROC_NUMBUF]; | ||
881 | size_t len; | ||
882 | int oom_adjust = OOM_DISABLE; | ||
883 | unsigned long flags; | ||
884 | |||
885 | if (!task) | ||
886 | return -ESRCH; | ||
887 | |||
888 | if (lock_task_sighand(task, &flags)) { | ||
889 | oom_adjust = task->signal->oom_adj; | ||
890 | unlock_task_sighand(task, &flags); | ||
891 | } | ||
892 | |||
893 | put_task_struct(task); | ||
894 | |||
895 | len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); | ||
896 | |||
897 | return simple_read_from_buffer(buf, count, ppos, buffer, len); | ||
898 | } | ||
899 | |||
900 | static ssize_t oom_adjust_write(struct file *file, const char __user *buf, | ||
901 | size_t count, loff_t *ppos) | ||
902 | { | ||
903 | struct task_struct *task; | ||
904 | char buffer[PROC_NUMBUF]; | ||
905 | int oom_adjust; | ||
906 | unsigned long flags; | ||
907 | int err; | ||
908 | |||
909 | memset(buffer, 0, sizeof(buffer)); | ||
910 | if (count > sizeof(buffer) - 1) | ||
911 | count = sizeof(buffer) - 1; | ||
912 | if (copy_from_user(buffer, buf, count)) { | ||
913 | err = -EFAULT; | ||
914 | goto out; | ||
915 | } | ||
916 | |||
917 | err = kstrtoint(strstrip(buffer), 0, &oom_adjust); | ||
918 | if (err) | ||
919 | goto out; | ||
920 | if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && | ||
921 | oom_adjust != OOM_DISABLE) { | ||
922 | err = -EINVAL; | ||
923 | goto out; | ||
924 | } | ||
925 | |||
926 | task = get_proc_task(file->f_path.dentry->d_inode); | ||
927 | if (!task) { | ||
928 | err = -ESRCH; | ||
929 | goto out; | ||
930 | } | ||
931 | |||
932 | task_lock(task); | ||
933 | if (!task->mm) { | ||
934 | err = -EINVAL; | ||
935 | goto err_task_lock; | ||
936 | } | ||
937 | |||
938 | if (!lock_task_sighand(task, &flags)) { | ||
939 | err = -ESRCH; | ||
940 | goto err_task_lock; | ||
941 | } | ||
942 | |||
943 | if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) { | ||
944 | err = -EACCES; | ||
945 | goto err_sighand; | ||
946 | } | ||
947 | |||
948 | /* | ||
949 | * Warn that /proc/pid/oom_adj is deprecated, see | ||
950 | * Documentation/feature-removal-schedule.txt. | ||
951 | */ | ||
952 | printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", | ||
953 | current->comm, task_pid_nr(current), task_pid_nr(task), | ||
954 | task_pid_nr(task)); | ||
955 | task->signal->oom_adj = oom_adjust; | ||
956 | /* | ||
957 | * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum | ||
958 | * value is always attainable. | ||
959 | */ | ||
960 | if (task->signal->oom_adj == OOM_ADJUST_MAX) | ||
961 | task->signal->oom_score_adj = OOM_SCORE_ADJ_MAX; | ||
962 | else | ||
963 | task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) / | ||
964 | -OOM_DISABLE; | ||
965 | trace_oom_score_adj_update(task); | ||
966 | err_sighand: | ||
967 | unlock_task_sighand(task, &flags); | ||
968 | err_task_lock: | ||
969 | task_unlock(task); | ||
970 | put_task_struct(task); | ||
971 | out: | ||
972 | return err < 0 ? err : count; | ||
973 | } | ||
974 | |||
975 | static const struct file_operations proc_oom_adjust_operations = { | ||
976 | .read = oom_adjust_read, | ||
977 | .write = oom_adjust_write, | ||
978 | .llseek = generic_file_llseek, | ||
979 | }; | ||
980 | |||
981 | static ssize_t oom_score_adj_read(struct file *file, char __user *buf, | 876 | static ssize_t oom_score_adj_read(struct file *file, char __user *buf, |
982 | size_t count, loff_t *ppos) | 877 | size_t count, loff_t *ppos) |
983 | { | 878 | { |
@@ -1051,15 +946,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, | |||
1051 | if (has_capability_noaudit(current, CAP_SYS_RESOURCE)) | 946 | if (has_capability_noaudit(current, CAP_SYS_RESOURCE)) |
1052 | task->signal->oom_score_adj_min = oom_score_adj; | 947 | task->signal->oom_score_adj_min = oom_score_adj; |
1053 | trace_oom_score_adj_update(task); | 948 | trace_oom_score_adj_update(task); |
1054 | /* | 949 | |
1055 | * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is | ||
1056 | * always attainable. | ||
1057 | */ | ||
1058 | if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) | ||
1059 | task->signal->oom_adj = OOM_DISABLE; | ||
1060 | else | ||
1061 | task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) / | ||
1062 | OOM_SCORE_ADJ_MAX; | ||
1063 | err_sighand: | 950 | err_sighand: |
1064 | unlock_task_sighand(task, &flags); | 951 | unlock_task_sighand(task, &flags); |
1065 | err_task_lock: | 952 | err_task_lock: |
@@ -2710,7 +2597,6 @@ static const struct pid_entry tgid_base_stuff[] = { | |||
2710 | REG("cgroup", S_IRUGO, proc_cgroup_operations), | 2597 | REG("cgroup", S_IRUGO, proc_cgroup_operations), |
2711 | #endif | 2598 | #endif |
2712 | INF("oom_score", S_IRUGO, proc_oom_score), | 2599 | INF("oom_score", S_IRUGO, proc_oom_score), |
2713 | REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations), | ||
2714 | REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), | 2600 | REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), |
2715 | #ifdef CONFIG_AUDITSYSCALL | 2601 | #ifdef CONFIG_AUDITSYSCALL |
2716 | REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), | 2602 | REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), |
@@ -3077,7 +2963,6 @@ static const struct pid_entry tid_base_stuff[] = { | |||
3077 | REG("cgroup", S_IRUGO, proc_cgroup_operations), | 2963 | REG("cgroup", S_IRUGO, proc_cgroup_operations), |
3078 | #endif | 2964 | #endif |
3079 | INF("oom_score", S_IRUGO, proc_oom_score), | 2965 | INF("oom_score", S_IRUGO, proc_oom_score), |
3080 | REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations), | ||
3081 | REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), | 2966 | REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), |
3082 | #ifdef CONFIG_AUDITSYSCALL | 2967 | #ifdef CONFIG_AUDITSYSCALL |
3083 | REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), | 2968 | REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), |
diff --git a/fs/proc/page.c b/fs/proc/page.c index 7fcd0d60a968..b8730d9ebaee 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c | |||
@@ -115,7 +115,13 @@ u64 stable_page_flags(struct page *page) | |||
115 | u |= 1 << KPF_COMPOUND_TAIL; | 115 | u |= 1 << KPF_COMPOUND_TAIL; |
116 | if (PageHuge(page)) | 116 | if (PageHuge(page)) |
117 | u |= 1 << KPF_HUGE; | 117 | u |= 1 << KPF_HUGE; |
118 | else if (PageTransCompound(page)) | 118 | /* |
119 | * PageTransCompound can be true for non-huge compound pages (slab | ||
120 | * pages or pages allocated by drivers with __GFP_COMP) because it | ||
121 | * just checks PG_head/PG_tail, so we need to check PageLRU to make | ||
122 | * sure a given page is a thp, not a non-huge compound page. | ||
123 | */ | ||
124 | else if (PageTransCompound(page) && PageLRU(compound_trans_head(page))) | ||
119 | u |= 1 << KPF_THP; | 125 | u |= 1 << KPF_THP; |
120 | 126 | ||
121 | /* | 127 | /* |
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index dcd56f84db7e..a781bdf06694 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c | |||
@@ -142,6 +142,7 @@ static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | rb_link_node(node, parent, p); | 144 | rb_link_node(node, parent, p); |
145 | rb_insert_color(node, &head->parent->root); | ||
145 | return 0; | 146 | return 0; |
146 | } | 147 | } |
147 | 148 | ||
@@ -168,10 +169,8 @@ static void init_header(struct ctl_table_header *head, | |||
168 | head->node = node; | 169 | head->node = node; |
169 | if (node) { | 170 | if (node) { |
170 | struct ctl_table *entry; | 171 | struct ctl_table *entry; |
171 | for (entry = table; entry->procname; entry++, node++) { | 172 | for (entry = table; entry->procname; entry++, node++) |
172 | rb_init_node(&node->node); | ||
173 | node->header = head; | 173 | node->header = head; |
174 | } | ||
175 | } | 174 | } |
176 | } | 175 | } |
177 | 176 | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4540b8f76f16..79827ce03e3b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -54,7 +54,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) | |||
54 | "VmPTE:\t%8lu kB\n" | 54 | "VmPTE:\t%8lu kB\n" |
55 | "VmSwap:\t%8lu kB\n", | 55 | "VmSwap:\t%8lu kB\n", |
56 | hiwater_vm << (PAGE_SHIFT-10), | 56 | hiwater_vm << (PAGE_SHIFT-10), |
57 | (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), | 57 | total_vm << (PAGE_SHIFT-10), |
58 | mm->locked_vm << (PAGE_SHIFT-10), | 58 | mm->locked_vm << (PAGE_SHIFT-10), |
59 | mm->pinned_vm << (PAGE_SHIFT-10), | 59 | mm->pinned_vm << (PAGE_SHIFT-10), |
60 | hiwater_rss << (PAGE_SHIFT-10), | 60 | hiwater_rss << (PAGE_SHIFT-10), |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index ff48c5a85309..5bc77817f382 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
@@ -1536,6 +1536,7 @@ out_unlock: | |||
1536 | static const struct vm_operations_struct ubifs_file_vm_ops = { | 1536 | static const struct vm_operations_struct ubifs_file_vm_ops = { |
1537 | .fault = filemap_fault, | 1537 | .fault = filemap_fault, |
1538 | .page_mkwrite = ubifs_vm_page_mkwrite, | 1538 | .page_mkwrite = ubifs_vm_page_mkwrite, |
1539 | .remap_pages = generic_file_remap_pages, | ||
1539 | }; | 1540 | }; |
1540 | 1541 | ||
1541 | static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) | 1542 | static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 1eaeb8be3aae..aa473fa640a2 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -940,7 +940,6 @@ xfs_file_mmap( | |||
940 | struct vm_area_struct *vma) | 940 | struct vm_area_struct *vma) |
941 | { | 941 | { |
942 | vma->vm_ops = &xfs_file_vm_ops; | 942 | vma->vm_ops = &xfs_file_vm_ops; |
943 | vma->vm_flags |= VM_CAN_NONLINEAR; | ||
944 | 943 | ||
945 | file_accessed(filp); | 944 | file_accessed(filp); |
946 | return 0; | 945 | return 0; |
@@ -1443,4 +1442,5 @@ const struct file_operations xfs_dir_file_operations = { | |||
1443 | static const struct vm_operations_struct xfs_file_vm_ops = { | 1442 | static const struct vm_operations_struct xfs_file_vm_ops = { |
1444 | .fault = filemap_fault, | 1443 | .fault = filemap_fault, |
1445 | .page_mkwrite = xfs_vm_page_mkwrite, | 1444 | .page_mkwrite = xfs_vm_page_mkwrite, |
1445 | .remap_pages = generic_file_remap_pages, | ||
1446 | }; | 1446 | }; |