aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-02-05 12:44:24 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2014-04-01 23:19:28 -0400
commit70eca12d80a8b51800b4ebfbc629e0ee34166779 (patch)
treecbf6ed71bde79c7f7813e13ff7969587561c81a5 /mm
parent240f3905f513cd5f27cc3f79bf03bf3f88323f41 (diff)
process_vm_access: take get_user_pages/put_pages one level up
... and trim the fuck out of process_vm_rw_pages() argument list. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/process_vm_access.c97
1 files changed, 39 insertions, 58 deletions
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 70ec3156f45a..c2a1916bacbf 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -39,69 +39,39 @@
39 * @bytes_copied: returns number of bytes successfully copied 39 * @bytes_copied: returns number of bytes successfully copied
40 * Returns 0 on success, error code otherwise 40 * Returns 0 on success, error code otherwise
41 */ 41 */
42static int process_vm_rw_pages(struct task_struct *task, 42static int process_vm_rw_pages(struct page **pages,
43 struct mm_struct *mm, 43 unsigned offset,
44 struct page **process_pages,
45 unsigned long pa,
46 unsigned long start_offset,
47 unsigned long len, 44 unsigned long len,
48 struct iov_iter *iter, 45 struct iov_iter *iter,
49 int vm_write, 46 int vm_write,
50 unsigned int nr_pages_to_copy, 47 unsigned int nr_pages_to_copy,
51 ssize_t *bytes_copied) 48 ssize_t *bytes_copied)
52{ 49{
53 int pages_pinned;
54 int pgs_copied = 0;
55 int j;
56 int ret;
57 ssize_t bytes_to_copy;
58 ssize_t rc = 0;
59
60 *bytes_copied = 0; 50 *bytes_copied = 0;
61 51
62 /* Get the pages we're interested in */
63 down_read(&mm->mmap_sem);
64 pages_pinned = get_user_pages(task, mm, pa,
65 nr_pages_to_copy,
66 vm_write, 0, process_pages, NULL);
67 up_read(&mm->mmap_sem);
68
69 if (pages_pinned != nr_pages_to_copy) {
70 rc = -EFAULT;
71 goto end;
72 }
73
74 /* Do the copy for each page */ 52 /* Do the copy for each page */
75 for (pgs_copied = 0; 53 while (iov_iter_count(iter) && nr_pages_to_copy--) {
76 (pgs_copied < nr_pages_to_copy) && iov_iter_count(iter); 54 struct page *page = *pages++;
77 pgs_copied++) { 55 size_t copy = min_t(ssize_t, PAGE_SIZE - offset, len);
78 struct page *page = process_pages[pgs_copied]; 56 size_t copied;
79 bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset, len);
80 57
81 if (vm_write) { 58 if (vm_write) {
82 if (bytes_to_copy > iov_iter_count(iter)) 59 if (copy > iov_iter_count(iter))
83 bytes_to_copy = iov_iter_count(iter); 60 copy = iov_iter_count(iter);
84 ret = iov_iter_copy_from_user(page, 61 copied = iov_iter_copy_from_user(page, iter,
85 iter, start_offset, bytes_to_copy); 62 offset, copy);
86 iov_iter_advance(iter, ret); 63 iov_iter_advance(iter, copied);
87 set_page_dirty_lock(page); 64 set_page_dirty_lock(page);
88 } else { 65 } else {
89 ret = copy_page_to_iter(page, start_offset, 66 copied = copy_page_to_iter(page, offset, copy, iter);
90 bytes_to_copy, iter);
91 }
92 *bytes_copied += ret;
93 len -= ret;
94 if (ret < bytes_to_copy && iov_iter_count(iter)) {
95 rc = -EFAULT;
96 break;
97 } 67 }
98 start_offset = 0; 68 *bytes_copied += copied;
69 len -= copied;
70 if (copied < copy && iov_iter_count(iter))
71 return -EFAULT;
72 offset = 0;
99 } 73 }
100 74 return 0;
101end:
102 for (j = 0; j < pages_pinned; j++)
103 put_page(process_pages[j]);
104 return rc;
105} 75}
106 76
107/* Maximum number of pages kmalloc'd to hold struct page's during copy */ 77/* Maximum number of pages kmalloc'd to hold struct page's during copy */
@@ -138,7 +108,6 @@ static int process_vm_rw_single_vec(unsigned long addr,
138 ssize_t bytes_copied_loop; 108 ssize_t bytes_copied_loop;
139 ssize_t rc = 0; 109 ssize_t rc = 0;
140 unsigned long nr_pages_copied = 0; 110 unsigned long nr_pages_copied = 0;
141 unsigned long nr_pages_to_copy;
142 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES 111 unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
143 / sizeof(struct pages *); 112 / sizeof(struct pages *);
144 113
@@ -150,23 +119,35 @@ static int process_vm_rw_single_vec(unsigned long addr,
150 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 119 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
151 120
152 while ((nr_pages_copied < nr_pages) && iov_iter_count(iter)) { 121 while ((nr_pages_copied < nr_pages) && iov_iter_count(iter)) {
122 int nr_pages_to_copy;
123 int pages_pinned;
153 nr_pages_to_copy = min(nr_pages - nr_pages_copied, 124 nr_pages_to_copy = min(nr_pages - nr_pages_copied,
154 max_pages_per_loop); 125 max_pages_per_loop);
155 126
156 rc = process_vm_rw_pages(task, mm, process_pages, pa, 127 /* Get the pages we're interested in */
128 down_read(&mm->mmap_sem);
129 pages_pinned = get_user_pages(task, mm, pa,
130 nr_pages_to_copy,
131 vm_write, 0, process_pages, NULL);
132 up_read(&mm->mmap_sem);
133
134 if (pages_pinned <= 0)
135 return -EFAULT;
136
137 rc = process_vm_rw_pages(process_pages,
157 start_offset, len, iter, 138 start_offset, len, iter,
158 vm_write, nr_pages_to_copy, 139 vm_write, pages_pinned,
159 &bytes_copied_loop); 140 &bytes_copied_loop);
160 start_offset = 0; 141 start_offset = 0;
161 *bytes_copied += bytes_copied_loop; 142 *bytes_copied += bytes_copied_loop;
143 len -= bytes_copied_loop;
144 nr_pages_copied += pages_pinned;
145 pa += pages_pinned * PAGE_SIZE;
146 while (pages_pinned)
147 put_page(process_pages[--pages_pinned]);
162 148
163 if (rc < 0) { 149 if (rc < 0)
164 return rc; 150 break;
165 } else {
166 len -= bytes_copied_loop;
167 nr_pages_copied += nr_pages_to_copy;
168 pa += nr_pages_to_copy * PAGE_SIZE;
169 }
170 } 151 }
171 152
172 return rc; 153 return rc;