diff options
Diffstat (limited to 'mm/process_vm_access.c')
-rw-r--r-- | mm/process_vm_access.c | 91 |
1 files changed, 23 insertions, 68 deletions
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 1e3c81103b2c..70ec3156f45a 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
@@ -51,13 +51,11 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
51 | ssize_t *bytes_copied) | 51 | ssize_t *bytes_copied) |
52 | { | 52 | { |
53 | int pages_pinned; | 53 | int pages_pinned; |
54 | void *target_kaddr; | ||
55 | int pgs_copied = 0; | 54 | int pgs_copied = 0; |
56 | int j; | 55 | int j; |
57 | int ret; | 56 | int ret; |
58 | ssize_t bytes_to_copy; | 57 | ssize_t bytes_to_copy; |
59 | ssize_t rc = 0; | 58 | ssize_t rc = 0; |
60 | const struct iovec *iov = iter->iov; | ||
61 | 59 | ||
62 | *bytes_copied = 0; | 60 | *bytes_copied = 0; |
63 | 61 | ||
@@ -75,77 +73,34 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
75 | 73 | ||
76 | /* Do the copy for each page */ | 74 | /* Do the copy for each page */ |
77 | for (pgs_copied = 0; | 75 | for (pgs_copied = 0; |
78 | (pgs_copied < nr_pages_to_copy) && iter->nr_segs; | 76 | (pgs_copied < nr_pages_to_copy) && iov_iter_count(iter); |
79 | pgs_copied++) { | 77 | pgs_copied++) { |
80 | /* Make sure we have a non zero length iovec */ | 78 | struct page *page = process_pages[pgs_copied]; |
81 | while (iter->nr_segs && iov->iov_len == 0) { | 79 | bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset, len); |
82 | iov++; | 80 | |
83 | iter->nr_segs--; | 81 | if (vm_write) { |
82 | if (bytes_to_copy > iov_iter_count(iter)) | ||
83 | bytes_to_copy = iov_iter_count(iter); | ||
84 | ret = iov_iter_copy_from_user(page, | ||
85 | iter, start_offset, bytes_to_copy); | ||
86 | iov_iter_advance(iter, ret); | ||
87 | set_page_dirty_lock(page); | ||
88 | } else { | ||
89 | ret = copy_page_to_iter(page, start_offset, | ||
90 | bytes_to_copy, iter); | ||
84 | } | 91 | } |
85 | if (!iter->nr_segs) | 92 | *bytes_copied += ret; |
86 | break; | 93 | len -= ret; |
87 | 94 | if (ret < bytes_to_copy && iov_iter_count(iter)) { | |
88 | /* | ||
89 | * Will copy smallest of: | ||
90 | * - bytes remaining in page | ||
91 | * - bytes remaining in destination iovec | ||
92 | */ | ||
93 | bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset, | ||
94 | len - *bytes_copied); | ||
95 | bytes_to_copy = min_t(ssize_t, bytes_to_copy, | ||
96 | iov->iov_len | ||
97 | - iter->iov_offset); | ||
98 | |||
99 | target_kaddr = kmap(process_pages[pgs_copied]) + start_offset; | ||
100 | |||
101 | if (vm_write) | ||
102 | ret = copy_from_user(target_kaddr, | ||
103 | iov->iov_base | ||
104 | + iter->iov_offset, | ||
105 | bytes_to_copy); | ||
106 | else | ||
107 | ret = copy_to_user(iov->iov_base | ||
108 | + iter->iov_offset, | ||
109 | target_kaddr, bytes_to_copy); | ||
110 | kunmap(process_pages[pgs_copied]); | ||
111 | if (ret) { | ||
112 | *bytes_copied += bytes_to_copy - ret; | ||
113 | pgs_copied++; | ||
114 | rc = -EFAULT; | 95 | rc = -EFAULT; |
115 | goto end; | 96 | break; |
116 | } | ||
117 | *bytes_copied += bytes_to_copy; | ||
118 | iter->iov_offset += bytes_to_copy; | ||
119 | if (iter->iov_offset == iov->iov_len) { | ||
120 | /* | ||
121 | * Need to copy remaining part of page into the | ||
122 | * next iovec if there are any bytes left in page | ||
123 | */ | ||
124 | iter->nr_segs--; | ||
125 | iov++; | ||
126 | iter->iov_offset = 0; | ||
127 | start_offset = (start_offset + bytes_to_copy) | ||
128 | % PAGE_SIZE; | ||
129 | if (start_offset) | ||
130 | pgs_copied--; | ||
131 | } else { | ||
132 | start_offset = 0; | ||
133 | } | 97 | } |
98 | start_offset = 0; | ||
134 | } | 99 | } |
135 | 100 | ||
136 | end: | 101 | end: |
137 | if (vm_write) { | 102 | for (j = 0; j < pages_pinned; j++) |
138 | for (j = 0; j < pages_pinned; j++) { | 103 | put_page(process_pages[j]); |
139 | if (j < pgs_copied) | ||
140 | set_page_dirty_lock(process_pages[j]); | ||
141 | put_page(process_pages[j]); | ||
142 | } | ||
143 | } else { | ||
144 | for (j = 0; j < pages_pinned; j++) | ||
145 | put_page(process_pages[j]); | ||
146 | } | ||
147 | |||
148 | iter->iov = iov; | ||
149 | return rc; | 104 | return rc; |
150 | } | 105 | } |
151 | 106 | ||
@@ -194,7 +149,7 @@ static int process_vm_rw_single_vec(unsigned long addr, | |||
194 | return 0; | 149 | return 0; |
195 | nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; | 150 | nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; |
196 | 151 | ||
197 | while ((nr_pages_copied < nr_pages) && iter->nr_segs) { | 152 | while ((nr_pages_copied < nr_pages) && iov_iter_count(iter)) { |
198 | nr_pages_to_copy = min(nr_pages - nr_pages_copied, | 153 | nr_pages_to_copy = min(nr_pages - nr_pages_copied, |
199 | max_pages_per_loop); | 154 | max_pages_per_loop); |
200 | 155 | ||
@@ -303,7 +258,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, | |||
303 | goto put_task_struct; | 258 | goto put_task_struct; |
304 | } | 259 | } |
305 | 260 | ||
306 | for (i = 0; i < riovcnt && iter->nr_segs; i++) { | 261 | for (i = 0; i < riovcnt && iov_iter_count(iter); i++) { |
307 | rc = process_vm_rw_single_vec( | 262 | rc = process_vm_rw_single_vec( |
308 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, | 263 | (unsigned long)rvec[i].iov_base, rvec[i].iov_len, |
309 | iter, process_pages, mm, task, vm_write, | 264 | iter, process_pages, mm, task, vm_write, |