diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2014-02-05 13:15:28 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-04-01 23:19:29 -0400 |
commit | 9acc1a0f9af244826de0eea2c38b78410eb0df6d (patch) | |
tree | 850049bb9d86b0a88b39b92b9641b37ed7a26a3c /mm | |
parent | e21345f9c3f8a806604910cae886e471a860ab86 (diff) |
process_vm_access: don't bother with returning the amounts of bytes copied
we can calculate that in the caller just fine, TYVM
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/process_vm_access.c | 47 |
1 files changed, 16 insertions, 31 deletions
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 83252d783482..15e4c2f312e8 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
@@ -43,13 +43,10 @@ static int process_vm_rw_pages(struct page **pages, | |||
43 | unsigned offset, | 43 | unsigned offset, |
44 | size_t len, | 44 | size_t len, |
45 | struct iov_iter *iter, | 45 | struct iov_iter *iter, |
46 | int vm_write, | 46 | int vm_write) |
47 | ssize_t *bytes_copied) | ||
48 | { | 47 | { |
49 | *bytes_copied = 0; | ||
50 | |||
51 | /* Do the copy for each page */ | 48 | /* Do the copy for each page */ |
52 | while (iov_iter_count(iter) && len) { | 49 | while (len && iov_iter_count(iter)) { |
53 | struct page *page = *pages++; | 50 | struct page *page = *pages++; |
54 | size_t copy = PAGE_SIZE - offset; | 51 | size_t copy = PAGE_SIZE - offset; |
55 | size_t copied; | 52 | size_t copied; |
@@ -67,7 +64,6 @@ static int process_vm_rw_pages(struct page **pages, | |||
67 | } else { | 64 | } else { |
68 | copied = copy_page_to_iter(page, offset, copy, iter); | 65 | copied = copy_page_to_iter(page, offset, copy, iter); |
69 | } | 66 | } |
70 | *bytes_copied += copied; | ||
71 | len -= copied; | 67 | len -= copied; |
72 | if (copied < copy && iov_iter_count(iter)) | 68 | if (copied < copy && iov_iter_count(iter)) |
73 | return -EFAULT; | 69 | return -EFAULT; |
@@ -101,20 +97,16 @@ static int process_vm_rw_single_vec(unsigned long addr, | |||
101 | struct page **process_pages, | 97 | struct page **process_pages, |
102 | struct mm_struct *mm, | 98 | struct mm_struct *mm, |
103 | struct task_struct *task, | 99 | struct task_struct *task, |
104 | int vm_write, | 100 | int vm_write) |
105 | ssize_t *bytes_copied) | ||
106 | { | 101 | { |
107 | unsigned long pa = addr & PAGE_MASK; | 102 | unsigned long pa = addr & PAGE_MASK; |
108 | unsigned long start_offset = addr - pa; | 103 | unsigned long start_offset = addr - pa; |
109 | unsigned long nr_pages; | 104 | unsigned long nr_pages; |
110 | ssize_t bytes_copied_loop; | ||
111 | ssize_t rc = 0; | 105 | ssize_t rc = 0; |
112 | unsigned long nr_pages_copied = 0; | 106 | unsigned long nr_pages_copied = 0; |
113 | unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES | 107 | unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES |
114 | / sizeof(struct pages *); | 108 | / sizeof(struct pages *); |
115 | 109 | ||
116 | *bytes_copied = 0; | ||
117 | |||
118 | /* Work out address and page range required */ | 110 | /* Work out address and page range required */ |
119 | if (len == 0) | 111 | if (len == 0) |
120 | return 0; | 112 | return 0; |
@@ -143,11 +135,9 @@ static int process_vm_rw_single_vec(unsigned long addr, | |||
143 | 135 | ||
144 | rc = process_vm_rw_pages(process_pages, | 136 | rc = process_vm_rw_pages(process_pages, |
145 | start_offset, n, iter, | 137 | start_offset, n, iter, |
146 | vm_write, | 138 | vm_write); |
147 | &bytes_copied_loop); | ||
148 | len -= n; | 139 | len -= n; |
149 | start_offset = 0; | 140 | start_offset = 0; |
150 | *bytes_copied += bytes_copied_loop; | ||
151 | nr_pages_copied += pages_pinned; | 141 | nr_pages_copied += pages_pinned; |
152 | pa += pages_pinned * PAGE_SIZE; | 142 | pa += pages_pinned * PAGE_SIZE; |
153 | while (pages_pinned) | 143 | while (pages_pinned) |
@@ -187,11 +177,10 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, | |||
187 | struct mm_struct *mm; | 177 | struct mm_struct *mm; |
188 | unsigned long i; | 178 | unsigned long i; |
189 | ssize_t rc = 0; | 179 | ssize_t rc = 0; |
190 | ssize_t bytes_copied_loop; | ||
191 | ssize_t bytes_copied = 0; | ||
192 | unsigned long nr_pages = 0; | 180 | unsigned long nr_pages = 0; |
193 | unsigned long nr_pages_iov; | 181 | unsigned long nr_pages_iov; |
194 | ssize_t iov_len; | 182 | ssize_t iov_len; |
183 | size_t total_len = iov_iter_count(iter); | ||
195 | 184 | ||
196 | /* | 185 | /* |
197 | * Work out how many pages of struct pages we're going to need | 186 | * Work out how many pages of struct pages we're going to need |
@@ -245,24 +234,20 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, | |||
245 | goto put_task_struct; | 234 | goto put_task_struct; |
246 | } | 235 | } |
247 | 236 | ||
248 | for (i = 0; i < riovcnt && iov_iter_count(iter); i++) { | 237 | for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) |
249 | rc = process_vm_rw_single_vec( | 238 | rc = process_vm_rw_single_vec( |
250 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, | 239 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, |
251 | iter, process_pages, mm, task, vm_write, | 240 | iter, process_pages, mm, task, vm_write); |
252 | &bytes_copied_loop); | 241 | |
253 | bytes_copied += bytes_copied_loop; | 242 | /* copied = space before - space after */ |
254 | if (rc != 0) { | 243 | total_len -= iov_iter_count(iter); |
255 | /* If we have managed to copy any data at all then | 244 | |
256 | we return the number of bytes copied. Otherwise | 245 | /* If we have managed to copy any data at all then |
257 | we return the error code */ | 246 | we return the number of bytes copied. Otherwise |
258 | if (bytes_copied) | 247 | we return the error code */ |
259 | rc = bytes_copied; | 248 | if (total_len) |
260 | goto put_mm; | 249 | rc = total_len; |
261 | } | ||
262 | } | ||
263 | 250 | ||
264 | rc = bytes_copied; | ||
265 | put_mm: | ||
266 | mmput(mm); | 251 | mmput(mm); |
267 | 252 | ||
268 | put_task_struct: | 253 | put_task_struct: |