summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2016-07-28 18:48:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commit3fa6c507319c897598512da91c010a4ad2ed682c (patch)
treec7746f5b4cca6489f11030db7e19f21de947ed04 /lib
parent7e4411bfe6dd29713f879717b433304a1992526d (diff)
mm: optimize copy_page_to/from_iter_iovec
copy_page_to_iter_iovec() and copy_page_from_iter_iovec() copy some data to userspace or from userspace. These functions have a fast path where they map a page using kmap_atomic and a slow path where they use kmap. kmap is slower than kmap_atomic, so the fast path is preferred. However, on kernels without highmem support, kmap just calls page_address, so there is no need to avoid kmap. On kernels without highmem support, the fast path just increases code size (and cache footprint) and it doesn't improve copy performance in any way. This patch enables the fast path only if CONFIG_HIGHMEM is defined. Code size reduced by this patch: x86 (without highmem) 928 x86-64 960 sparc64 848 alpha 1136 pa-risc 1200 [akpm@linux-foundation.org: use IS_ENABLED(), per Andi] Link: http://lkml.kernel.org/r/alpine.LRH.2.02.1607221711410.4818@file01.intranet.prod.int.rdu2.redhat.com Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Mel Gorman <mgorman@suse.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Andi Kleen <andi@firstfloor.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/iov_iter.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index d67c8288d95d..9e8c7386b3a0 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -144,7 +144,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
144 buf = iov->iov_base + skip; 144 buf = iov->iov_base + skip;
145 copy = min(bytes, iov->iov_len - skip); 145 copy = min(bytes, iov->iov_len - skip);
146 146
147 if (!fault_in_pages_writeable(buf, copy)) { 147 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
148 kaddr = kmap_atomic(page); 148 kaddr = kmap_atomic(page);
149 from = kaddr + offset; 149 from = kaddr + offset;
150 150
@@ -175,6 +175,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
175 copy = min(bytes, iov->iov_len - skip); 175 copy = min(bytes, iov->iov_len - skip);
176 } 176 }
177 /* Too bad - revert to non-atomic kmap */ 177 /* Too bad - revert to non-atomic kmap */
178
178 kaddr = kmap(page); 179 kaddr = kmap(page);
179 from = kaddr + offset; 180 from = kaddr + offset;
180 left = __copy_to_user(buf, from, copy); 181 left = __copy_to_user(buf, from, copy);
@@ -193,6 +194,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
193 bytes -= copy; 194 bytes -= copy;
194 } 195 }
195 kunmap(page); 196 kunmap(page);
197
196done: 198done:
197 if (skip == iov->iov_len) { 199 if (skip == iov->iov_len) {
198 iov++; 200 iov++;
@@ -225,7 +227,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
225 buf = iov->iov_base + skip; 227 buf = iov->iov_base + skip;
226 copy = min(bytes, iov->iov_len - skip); 228 copy = min(bytes, iov->iov_len - skip);
227 229
228 if (!fault_in_pages_readable(buf, copy)) { 230 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
229 kaddr = kmap_atomic(page); 231 kaddr = kmap_atomic(page);
230 to = kaddr + offset; 232 to = kaddr + offset;
231 233
@@ -256,6 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
256 copy = min(bytes, iov->iov_len - skip); 258 copy = min(bytes, iov->iov_len - skip);
257 } 259 }
258 /* Too bad - revert to non-atomic kmap */ 260 /* Too bad - revert to non-atomic kmap */
261
259 kaddr = kmap(page); 262 kaddr = kmap(page);
260 to = kaddr + offset; 263 to = kaddr + offset;
261 left = __copy_from_user(to, buf, copy); 264 left = __copy_from_user(to, buf, copy);
@@ -274,6 +277,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
274 bytes -= copy; 277 bytes -= copy;
275 } 278 }
276 kunmap(page); 279 kunmap(page);
280
277done: 281done:
278 if (skip == iov->iov_len) { 282 if (skip == iov->iov_len) {
279 iov++; 283 iov++;