diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2014-02-05 10:14:01 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2014-04-01 23:19:25 -0400 |
commit | 480402e18def5514c9dc8cb04e3c0e7482ff2b86 (patch) | |
tree | 905fa58d786565d9ee400102f2d1edfa1e0df1f5 /mm | |
parent | ec69557982563c97b3a7d68dd271be5105b83869 (diff) |
untangling process_vm_..., part 1
we want to massage it to use of iov_iter. This one is an equivalent
transformation - just introduce a local variable mirroring
lvec + *lvec_current.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/process_vm_access.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index fd26d0433509..7b8d63e6c30b 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c | |||
@@ -60,6 +60,7 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
60 | int ret; | 60 | int ret; |
61 | ssize_t bytes_to_copy; | 61 | ssize_t bytes_to_copy; |
62 | ssize_t rc = 0; | 62 | ssize_t rc = 0; |
63 | const struct iovec *iov = lvec + *lvec_current; | ||
63 | 64 | ||
64 | *bytes_copied = 0; | 65 | *bytes_copied = 0; |
65 | 66 | ||
@@ -81,8 +82,10 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
81 | pgs_copied++) { | 82 | pgs_copied++) { |
82 | /* Make sure we have a non zero length iovec */ | 83 | /* Make sure we have a non zero length iovec */ |
83 | while (*lvec_current < lvec_cnt | 84 | while (*lvec_current < lvec_cnt |
84 | && lvec[*lvec_current].iov_len == 0) | 85 | && iov->iov_len == 0) { |
86 | iov++; | ||
85 | (*lvec_current)++; | 87 | (*lvec_current)++; |
88 | } | ||
86 | if (*lvec_current == lvec_cnt) | 89 | if (*lvec_current == lvec_cnt) |
87 | break; | 90 | break; |
88 | 91 | ||
@@ -94,18 +97,18 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
94 | bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset, | 97 | bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset, |
95 | len - *bytes_copied); | 98 | len - *bytes_copied); |
96 | bytes_to_copy = min_t(ssize_t, bytes_to_copy, | 99 | bytes_to_copy = min_t(ssize_t, bytes_to_copy, |
97 | lvec[*lvec_current].iov_len | 100 | iov->iov_len |
98 | - *lvec_offset); | 101 | - *lvec_offset); |
99 | 102 | ||
100 | target_kaddr = kmap(process_pages[pgs_copied]) + start_offset; | 103 | target_kaddr = kmap(process_pages[pgs_copied]) + start_offset; |
101 | 104 | ||
102 | if (vm_write) | 105 | if (vm_write) |
103 | ret = copy_from_user(target_kaddr, | 106 | ret = copy_from_user(target_kaddr, |
104 | lvec[*lvec_current].iov_base | 107 | iov->iov_base |
105 | + *lvec_offset, | 108 | + *lvec_offset, |
106 | bytes_to_copy); | 109 | bytes_to_copy); |
107 | else | 110 | else |
108 | ret = copy_to_user(lvec[*lvec_current].iov_base | 111 | ret = copy_to_user(iov->iov_base |
109 | + *lvec_offset, | 112 | + *lvec_offset, |
110 | target_kaddr, bytes_to_copy); | 113 | target_kaddr, bytes_to_copy); |
111 | kunmap(process_pages[pgs_copied]); | 114 | kunmap(process_pages[pgs_copied]); |
@@ -117,12 +120,13 @@ static int process_vm_rw_pages(struct task_struct *task, | |||
117 | } | 120 | } |
118 | *bytes_copied += bytes_to_copy; | 121 | *bytes_copied += bytes_to_copy; |
119 | *lvec_offset += bytes_to_copy; | 122 | *lvec_offset += bytes_to_copy; |
120 | if (*lvec_offset == lvec[*lvec_current].iov_len) { | 123 | if (*lvec_offset == iov->iov_len) { |
121 | /* | 124 | /* |
122 | * Need to copy remaining part of page into the | 125 | * Need to copy remaining part of page into the |
123 | * next iovec if there are any bytes left in page | 126 | * next iovec if there are any bytes left in page |
124 | */ | 127 | */ |
125 | (*lvec_current)++; | 128 | (*lvec_current)++; |
129 | iov++; | ||
126 | *lvec_offset = 0; | 130 | *lvec_offset = 0; |
127 | start_offset = (start_offset + bytes_to_copy) | 131 | start_offset = (start_offset + bytes_to_copy) |
128 | % PAGE_SIZE; | 132 | % PAGE_SIZE; |