aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-19 21:19:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-19 21:19:19 -0500
commitecb5ec044ab99be1f35e93962fa43e4bb3120d9e (patch)
tree8832ba0b39783bd8a836b5552eb8f74d2e450afe /mm
parent298647e31af52e795867a399fa049cebd88067ff (diff)
parente3bb504efd919f7bacd24cb14038953899b909e1 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile #3 from Al Viro: "Assorted fixes and patches from the last cycle" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: [regression] chunk lost from bd9b51 vfs: make mounts and mountstats honor root dir like mountinfo does vfs: cleanup show_mountinfo init: fix read-write root mount unfuck binfmt_misc.c (broken by commit e6084d4) vm_area_operations: kill ->migrate() new helper: iter_is_iovec() move_extent_per_page(): get rid of unused w_flags lustre: get rid of playing with ->fs btrfs: filp_open() returns ERR_PTR() on failure, not NULL...
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/mempolicy.c5
-rw-r--r--mm/migrate.c21
-rw-r--r--mm/shmem.c2
4 files changed, 2 insertions, 28 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index e8905bc3cbd7..bd8543c6508f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2464,7 +2464,7 @@ ssize_t generic_perform_write(struct file *file,
2464 /* 2464 /*
2465 * Copies from kernel address space cannot fail (NFSD is a big user). 2465 * Copies from kernel address space cannot fail (NFSD is a big user).
2466 */ 2466 */
2467 if (segment_eq(get_fs(), KERNEL_DS)) 2467 if (!iter_is_iovec(i))
2468 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2468 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2469 2469
2470 do { 2470 do {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f22c55947181..0e0961b8c39c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1041,10 +1041,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1041 1041
1042 down_read(&mm->mmap_sem); 1042 down_read(&mm->mmap_sem);
1043 1043
1044 err = migrate_vmas(mm, from, to, flags);
1045 if (err)
1046 goto out;
1047
1048 /* 1044 /*
1049 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 1045 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1050 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 1046 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
@@ -1124,7 +1120,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1124 if (err < 0) 1120 if (err < 0)
1125 break; 1121 break;
1126 } 1122 }
1127out:
1128 up_read(&mm->mmap_sem); 1123 up_read(&mm->mmap_sem);
1129 if (err < 0) 1124 if (err < 0)
1130 return err; 1125 return err;
diff --git a/mm/migrate.c b/mm/migrate.c
index b1d02127e1be..344cdf692fc8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1536,27 +1536,6 @@ out:
1536 return err; 1536 return err;
1537} 1537}
1538 1538
1539/*
1540 * Call migration functions in the vma_ops that may prepare
1541 * memory in a vm for migration. migration functions may perform
1542 * the migration for vmas that do not have an underlying page struct.
1543 */
1544int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1545 const nodemask_t *from, unsigned long flags)
1546{
1547 struct vm_area_struct *vma;
1548 int err = 0;
1549
1550 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1551 if (vma->vm_ops && vma->vm_ops->migrate) {
1552 err = vma->vm_ops->migrate(vma, to, from, flags);
1553 if (err)
1554 break;
1555 }
1556 }
1557 return err;
1558}
1559
1560#ifdef CONFIG_NUMA_BALANCING 1539#ifdef CONFIG_NUMA_BALANCING
1561/* 1540/*
1562 * Returns true if this is a safe migration target node for misplaced NUMA 1541 * Returns true if this is a safe migration target node for misplaced NUMA
diff --git a/mm/shmem.c b/mm/shmem.c
index 185836ba53ef..73ba1df7c8ba 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1536,7 +1536,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1536 * holes of a sparse file, we actually need to allocate those pages, 1536 * holes of a sparse file, we actually need to allocate those pages,
1537 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1537 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1538 */ 1538 */
1539 if (segment_eq(get_fs(), KERNEL_DS)) 1539 if (!iter_is_iovec(to))
1540 sgp = SGP_DIRTY; 1540 sgp = SGP_DIRTY;
1541 1541
1542 index = *ppos >> PAGE_CACHE_SHIFT; 1542 index = *ppos >> PAGE_CACHE_SHIFT;