diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2014-02-05 11:51:53 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-04-01 23:19:27 -0400 |
commit | 9f78bdfabf0de0bc8d3cc97d06908651b577a567 (patch) | |
tree | 0f663266621de5528cba80d2f68d979dcae5ee62 /mm/process_vm_access.c | |
parent | 1291afc18115885cf3ff265a5dc836393060e820 (diff) |
process_vm_access: switch to iov_iter
instead of keeping its pieces in separate variables and passing
pointers to all of them...
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/process_vm_access.c')
-rw-r--r-- | mm/process_vm_access.c | 62 |
1 files changed, 28 insertions, 34 deletions
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 4bbd495fcedd..1e3c81103b2c 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
@@ -45,9 +45,7 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
45 | unsigned long pa, | 45 | unsigned long pa, |
46 | unsigned long start_offset, | 46 | unsigned long start_offset, |
47 | unsigned long len, | 47 | unsigned long len, |
48 | const struct iovec **iovp, | 48 | struct iov_iter *iter, |
49 | unsigned long *left, | ||
50 | size_t *lvec_offset, | ||
51 | int vm_write, | 49 | int vm_write, |
52 | unsigned int nr_pages_to_copy, | 50 | unsigned int nr_pages_to_copy, |
53 | ssize_t *bytes_copied) | 51 | ssize_t *bytes_copied) |
@@ -59,7 +57,7 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
59 | int ret; | 57 | int ret; |
60 | ssize_t bytes_to_copy; | 58 | ssize_t bytes_to_copy; |
61 | ssize_t rc = 0; | 59 | ssize_t rc = 0; |
62 | const struct iovec *iov = *iovp; | 60 | const struct iovec *iov = iter->iov; |
63 | 61 | ||
64 | *bytes_copied = 0; | 62 | *bytes_copied = 0; |
65 | 63 | ||
@@ -77,14 +75,14 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
77 | 75 | ||
78 | /* Do the copy for each page */ | 76 | /* Do the copy for each page */ |
79 | for (pgs_copied = 0; | 77 | for (pgs_copied = 0; |
80 | (pgs_copied < nr_pages_to_copy) && *left; | 78 | (pgs_copied < nr_pages_to_copy) && iter->nr_segs; |
81 | pgs_copied++) { | 79 | pgs_copied++) { |
82 | /* Make sure we have a non zero length iovec */ | 80 | /* Make sure we have a non zero length iovec */ |
83 | while (*left && iov->iov_len == 0) { | 81 | while (iter->nr_segs && iov->iov_len == 0) { |
84 | iov++; | 82 | iov++; |
85 | (*left)--; | 83 | iter->nr_segs--; |
86 | } | 84 | } |
87 | if (!*left) | 85 | if (!iter->nr_segs) |
88 | break; | 86 | break; |
89 | 87 | ||
90 | /* | 88 | /* |
@@ -96,18 +94,18 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
96 | len - *bytes_copied); | 94 | len - *bytes_copied); |
97 | bytes_to_copy = min_t(ssize_t, bytes_to_copy, | 95 | bytes_to_copy = min_t(ssize_t, bytes_to_copy, |
98 | iov->iov_len | 96 | iov->iov_len |
99 | - *lvec_offset); | 97 | - iter->iov_offset); |
100 | 98 | ||
101 | target_kaddr = kmap(process_pages[pgs_copied]) + start_offset; | 99 | target_kaddr = kmap(process_pages[pgs_copied]) + start_offset; |
102 | 100 | ||
103 | if (vm_write) | 101 | if (vm_write) |
104 | ret = copy_from_user(target_kaddr, | 102 | ret = copy_from_user(target_kaddr, |
105 | iov->iov_base | 103 | iov->iov_base |
106 | + *lvec_offset, | 104 | + iter->iov_offset, |
107 | bytes_to_copy); | 105 | bytes_to_copy); |
108 | else | 106 | else |
109 | ret = copy_to_user(iov->iov_base | 107 | ret = copy_to_user(iov->iov_base |
110 | + *lvec_offset, | 108 | + iter->iov_offset, |
111 | target_kaddr, bytes_to_copy); | 109 | target_kaddr, bytes_to_copy); |
112 | kunmap(process_pages[pgs_copied]); | 110 | kunmap(process_pages[pgs_copied]); |
113 | if (ret) { | 111 | if (ret) { |
@@ -117,15 +115,15 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
117 | goto end; | 115 | goto end; |
118 | } | 116 | } |
119 | *bytes_copied += bytes_to_copy; | 117 | *bytes_copied += bytes_to_copy; |
120 | *lvec_offset += bytes_to_copy; | 118 | iter->iov_offset += bytes_to_copy; |
121 | if (*lvec_offset == iov->iov_len) { | 119 | if (iter->iov_offset == iov->iov_len) { |
122 | /* | 120 | /* |
123 | * Need to copy remaining part of page into the | 121 | * Need to copy remaining part of page into the |
124 | * next iovec if there are any bytes left in page | 122 | * next iovec if there are any bytes left in page |
125 | */ | 123 | */ |
126 | (*left)--; | 124 | iter->nr_segs--; |
127 | iov++; | 125 | iov++; |
128 | *lvec_offset = 0; | 126 | iter->iov_offset = 0; |
129 | start_offset = (start_offset + bytes_to_copy) | 127 | start_offset = (start_offset + bytes_to_copy) |
130 | % PAGE_SIZE; | 128 | % PAGE_SIZE; |
131 | if (start_offset) | 129 | if (start_offset) |
@@ -147,7 +145,7 @@ end: | |||
147 | put_page(process_pages[j]); | 145 | put_page(process_pages[j]); |
148 | } | 146 | } |
149 | 147 | ||
150 | *iovp = iov; | 148 | iter->iov = iov; |
151 | return rc; | 149 | return rc; |
152 | } | 150 | } |
153 | 151 | ||
@@ -172,9 +170,7 @@ end: | |||
172 | */ | 170 | */ |
173 | static int process_vm_rw_single_vec(unsigned long addr, | 171 | static int process_vm_rw_single_vec(unsigned long addr, |
174 | unsigned long len, | 172 | unsigned long len, |
175 | const struct iovec **iovp, | 173 | struct iov_iter *iter, |
176 | unsigned long *left, | ||
177 | size_t *lvec_offset, | ||
178 | struct page **process_pages, | 174 | struct page **process_pages, |
179 | struct mm_struct *mm, | 175 | struct mm_struct *mm, |
180 | struct task_struct *task, | 176 | struct task_struct *task, |
@@ -198,14 +194,12 @@ static int process_vm_rw_single_vec(unsigned long addr, | |||
198 | return 0; | 194 | return 0; |
199 | nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; | 195 | nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; |
200 | 196 | ||
201 | while ((nr_pages_copied < nr_pages) && *left) { | 197 | while ((nr_pages_copied < nr_pages) && iter->nr_segs) { |
202 | nr_pages_to_copy = min(nr_pages - nr_pages_copied, | 198 | nr_pages_to_copy = min(nr_pages - nr_pages_copied, |
203 | max_pages_per_loop); | 199 | max_pages_per_loop); |
204 | 200 | ||
205 | rc = process_vm_rw_pages(task, mm, process_pages, pa, | 201 | rc = process_vm_rw_pages(task, mm, process_pages, pa, |
206 | start_offset, len, | 202 | start_offset, len, iter, |
207 | iovp, left, | ||
208 | lvec_offset, | ||
209 | vm_write, nr_pages_to_copy, | 203 | vm_write, nr_pages_to_copy, |
210 | &bytes_copied_loop); | 204 | &bytes_copied_loop); |
211 | start_offset = 0; | 205 | start_offset = 0; |
@@ -240,8 +234,7 @@ static int process_vm_rw_single_vec(unsigned long addr, | |||
240 | * return less bytes than expected if an error occurs during the copying | 234 | * return less bytes than expected if an error occurs during the copying |
241 | * process. | 235 | * process. |
242 | */ | 236 | */ |
243 | static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, | 237 | static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, |
244 | unsigned long liovcnt, | ||
245 | const struct iovec *rvec, | 238 | const struct iovec *rvec, |
246 | unsigned long riovcnt, | 239 | unsigned long riovcnt, |
247 | unsigned long flags, int vm_write) | 240 | unsigned long flags, int vm_write) |
@@ -256,8 +249,6 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, | |||
256 | ssize_t bytes_copied = 0; | 249 | ssize_t bytes_copied = 0; |
257 | unsigned long nr_pages = 0; | 250 | unsigned long nr_pages = 0; |
258 | unsigned long nr_pages_iov; | 251 | unsigned long nr_pages_iov; |
259 | unsigned long left = liovcnt; | ||
260 | size_t iov_l_curr_offset = 0; | ||
261 | ssize_t iov_len; | 252 | ssize_t iov_len; |
262 | 253 | ||
263 | /* | 254 | /* |
@@ -312,11 +303,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec, | |||
312 | goto put_task_struct; | 303 | goto put_task_struct; |
313 | } | 304 | } |
314 | 305 | ||
315 | for (i = 0; i < riovcnt && left; i++) { | 306 | for (i = 0; i < riovcnt && iter->nr_segs; i++) { |
316 | rc = process_vm_rw_single_vec( | 307 | rc = process_vm_rw_single_vec( |
317 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, | 308 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, |
318 | &lvec, &left, &iov_l_curr_offset, | 309 | iter, process_pages, mm, task, vm_write, |
319 | process_pages, mm, task, vm_write, &bytes_copied_loop); | 310 | &bytes_copied_loop); |
320 | bytes_copied += bytes_copied_loop; | 311 | bytes_copied += bytes_copied_loop; |
321 | if (rc != 0) { | 312 | if (rc != 0) { |
322 | /* If we have managed to copy any data at all then | 313 | /* If we have managed to copy any data at all then |
@@ -365,6 +356,7 @@ static ssize_t process_vm_rw(pid_t pid, | |||
365 | struct iovec iovstack_r[UIO_FASTIOV]; | 356 | struct iovec iovstack_r[UIO_FASTIOV]; |
366 | struct iovec *iov_l = iovstack_l; | 357 | struct iovec *iov_l = iovstack_l; |
367 | struct iovec *iov_r = iovstack_r; | 358 | struct iovec *iov_r = iovstack_r; |
359 | struct iov_iter iter; | ||
368 | ssize_t rc; | 360 | ssize_t rc; |
369 | 361 | ||
370 | if (flags != 0) | 362 | if (flags != 0) |
@@ -380,13 +372,14 @@ static ssize_t process_vm_rw(pid_t pid, | |||
380 | if (rc <= 0) | 372 | if (rc <= 0) |
381 | goto free_iovecs; | 373 | goto free_iovecs; |
382 | 374 | ||
375 | iov_iter_init(&iter, iov_l, liovcnt, rc, 0); | ||
376 | |||
383 | rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV, | 377 | rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV, |
384 | iovstack_r, &iov_r); | 378 | iovstack_r, &iov_r); |
385 | if (rc <= 0) | 379 | if (rc <= 0) |
386 | goto free_iovecs; | 380 | goto free_iovecs; |
387 | 381 | ||
388 | rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags, | 382 | rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); |
389 | vm_write); | ||
390 | 383 | ||
391 | free_iovecs: | 384 | free_iovecs: |
392 | if (iov_r != iovstack_r) | 385 | if (iov_r != iovstack_r) |
@@ -426,6 +419,7 @@ compat_process_vm_rw(compat_pid_t pid, | |||
426 | struct iovec iovstack_r[UIO_FASTIOV]; | 419 | struct iovec iovstack_r[UIO_FASTIOV]; |
427 | struct iovec *iov_l = iovstack_l; | 420 | struct iovec *iov_l = iovstack_l; |
428 | struct iovec *iov_r = iovstack_r; | 421 | struct iovec *iov_r = iovstack_r; |
422 | struct iov_iter iter; | ||
429 | ssize_t rc = -EFAULT; | 423 | ssize_t rc = -EFAULT; |
430 | 424 | ||
431 | if (flags != 0) | 425 | if (flags != 0) |
@@ -441,14 +435,14 @@ compat_process_vm_rw(compat_pid_t pid, | |||
441 | &iov_l); | 435 | &iov_l); |
442 | if (rc <= 0) | 436 | if (rc <= 0) |
443 | goto free_iovecs; | 437 | goto free_iovecs; |
438 | iov_iter_init(&iter, iov_l, liovcnt, rc, 0); | ||
444 | rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, | 439 | rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, |
445 | UIO_FASTIOV, iovstack_r, | 440 | UIO_FASTIOV, iovstack_r, |
446 | &iov_r); | 441 | &iov_r); |
447 | if (rc <= 0) | 442 | if (rc <= 0) |
448 | goto free_iovecs; | 443 | goto free_iovecs; |
449 | 444 | ||
450 | rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags, | 445 | rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); |
451 | vm_write); | ||
452 | 446 | ||
453 | free_iovecs: | 447 | free_iovecs: |
454 | if (iov_r != iovstack_r) | 448 | if (iov_r != iovstack_r) |