aboutsummaryrefslogtreecommitdiffstats
path: root/mm/process_vm_access.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-02-05 10:40:15 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2014-04-01 23:19:26 -0400
commit1291afc18115885cf3ff265a5dc836393060e820 (patch)
treeac1a0c036a2711ab4fd896d706bb0b13deebc6d3 /mm/process_vm_access.c
parent12e3004e464f554e0d57ddd0b4185060275e1f12 (diff)
untangling process_vm_..., part 4
instead of passing vector size (by value) and index (by reference), pass the number of elements remaining. That's all we care about in these functions by that point. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/process_vm_access.c')
-rw-r--r--mm/process_vm_access.c29
1 files changed, 13 insertions, 16 deletions
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 40dfb396e8ab..4bbd495fcedd 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -46,8 +46,7 @@ static int process_vm_rw_pages(struct task_struct *task,
46 unsigned long start_offset, 46 unsigned long start_offset,
47 unsigned long len, 47 unsigned long len,
48 const struct iovec **iovp, 48 const struct iovec **iovp,
49 unsigned long lvec_cnt, 49 unsigned long *left,
50 unsigned long *lvec_current,
51 size_t *lvec_offset, 50 size_t *lvec_offset,
52 int vm_write, 51 int vm_write,
53 unsigned int nr_pages_to_copy, 52 unsigned int nr_pages_to_copy,
@@ -78,15 +77,14 @@ static int process_vm_rw_pages(struct task_struct *task,
78 77
79 /* Do the copy for each page */ 78 /* Do the copy for each page */
80 for (pgs_copied = 0; 79 for (pgs_copied = 0;
81 (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt); 80 (pgs_copied < nr_pages_to_copy) && *left;
82 pgs_copied++) { 81 pgs_copied++) {
83 /* Make sure we have a non zero length iovec */ 82 /* Make sure we have a non zero length iovec */
84 while (*lvec_current < lvec_cnt 83 while (*left && iov->iov_len == 0) {
85 && iov->iov_len == 0) {
86 iov++; 84 iov++;
87 (*lvec_current)++; 85 (*left)--;
88 } 86 }
89 if (*lvec_current == lvec_cnt) 87 if (!*left)
90 break; 88 break;
91 89
92 /* 90 /*
@@ -125,7 +123,7 @@ static int process_vm_rw_pages(struct task_struct *task,
125 * Need to copy remaining part of page into the 123 * Need to copy remaining part of page into the
126 * next iovec if there are any bytes left in page 124 * next iovec if there are any bytes left in page
127 */ 125 */
128 (*lvec_current)++; 126 (*left)--;
129 iov++; 127 iov++;
130 *lvec_offset = 0; 128 *lvec_offset = 0;
131 start_offset = (start_offset + bytes_to_copy) 129 start_offset = (start_offset + bytes_to_copy)
@@ -175,8 +173,7 @@ end:
175static int process_vm_rw_single_vec(unsigned long addr, 173static int process_vm_rw_single_vec(unsigned long addr,
176 unsigned long len, 174 unsigned long len,
177 const struct iovec **iovp, 175 const struct iovec **iovp,
178 unsigned long lvec_cnt, 176 unsigned long *left,
179 unsigned long *lvec_current,
180 size_t *lvec_offset, 177 size_t *lvec_offset,
181 struct page **process_pages, 178 struct page **process_pages,
182 struct mm_struct *mm, 179 struct mm_struct *mm,
@@ -201,14 +198,14 @@ static int process_vm_rw_single_vec(unsigned long addr,
201 return 0; 198 return 0;
202 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 199 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
203 200
204 while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) { 201 while ((nr_pages_copied < nr_pages) && *left) {
205 nr_pages_to_copy = min(nr_pages - nr_pages_copied, 202 nr_pages_to_copy = min(nr_pages - nr_pages_copied,
206 max_pages_per_loop); 203 max_pages_per_loop);
207 204
208 rc = process_vm_rw_pages(task, mm, process_pages, pa, 205 rc = process_vm_rw_pages(task, mm, process_pages, pa,
209 start_offset, len, 206 start_offset, len,
210 iovp, lvec_cnt, 207 iovp, left,
211 lvec_current, lvec_offset, 208 lvec_offset,
212 vm_write, nr_pages_to_copy, 209 vm_write, nr_pages_to_copy,
213 &bytes_copied_loop); 210 &bytes_copied_loop);
214 start_offset = 0; 211 start_offset = 0;
@@ -259,7 +256,7 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
259 ssize_t bytes_copied = 0; 256 ssize_t bytes_copied = 0;
260 unsigned long nr_pages = 0; 257 unsigned long nr_pages = 0;
261 unsigned long nr_pages_iov; 258 unsigned long nr_pages_iov;
262 unsigned long iov_l_curr_idx = 0; 259 unsigned long left = liovcnt;
263 size_t iov_l_curr_offset = 0; 260 size_t iov_l_curr_offset = 0;
264 ssize_t iov_len; 261 ssize_t iov_len;
265 262
@@ -315,10 +312,10 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
315 goto put_task_struct; 312 goto put_task_struct;
316 } 313 }
317 314
318 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { 315 for (i = 0; i < riovcnt && left; i++) {
319 rc = process_vm_rw_single_vec( 316 rc = process_vm_rw_single_vec(
320 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, 317 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
321 &lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset, 318 &lvec, &left, &iov_l_curr_offset,
322 process_pages, mm, task, vm_write, &bytes_copied_loop); 319 process_pages, mm, task, vm_write, &bytes_copied_loop);
323 bytes_copied += bytes_copied_loop; 320 bytes_copied += bytes_copied_loop;
324 if (rc != 0) { 321 if (rc != 0) {