diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-02-25 02:22:20 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-02-25 04:20:05 -0500 |
| commit | 95108fa34a83ffd97e0af959e4b28d7c62008781 (patch) | |
| tree | 06577270f81166d67b9058be1c04812b1ccf1058 | |
| parent | 3255aa2eb636a508fc82a73fabbb8aaf2ff23c0f (diff) | |
x86: usercopy: check for total size when deciding non-temporal cutoff
Impact: make more types of copies non-temporal
This change makes the following simple fix:
30d697f: x86: fix performance regression in write() syscall
A bit more sophisticated: we check the 'total' number of bytes
written to decide whether to copy in a cached or a non-temporal
way.
This will for example cause the tail (modulo 4096 bytes) chunk
of a large write() to be non-temporal too - not just the page-sized
chunks.
Cc: Salman Qazi <sqazi@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | arch/x86/include/asm/uaccess_64.h | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index a748253db0c9..dcaa0404cf7b 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
| @@ -198,7 +198,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, | |||
| 198 | * non-temporal stores here. Smaller writes get handled | 198 | * non-temporal stores here. Smaller writes get handled |
| 199 | * via regular __copy_from_user(): | 199 | * via regular __copy_from_user(): |
| 200 | */ | 200 | */ |
| 201 | if (likely(size >= PAGE_SIZE)) | 201 | if (likely(total >= PAGE_SIZE)) |
| 202 | return __copy_user_nocache(dst, src, size, 1); | 202 | return __copy_user_nocache(dst, src, size, 1); |
| 203 | else | 203 | else |
| 204 | return __copy_from_user(dst, src, size); | 204 | return __copy_from_user(dst, src, size); |
| @@ -207,7 +207,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, | |||
| 207 | static inline int __copy_from_user_inatomic_nocache(void *dst, | 207 | static inline int __copy_from_user_inatomic_nocache(void *dst, |
| 208 | const void __user *src, unsigned size, unsigned total) | 208 | const void __user *src, unsigned size, unsigned total) |
| 209 | { | 209 | { |
| 210 | if (likely(size >= PAGE_SIZE)) | 210 | if (likely(total >= PAGE_SIZE)) |
| 211 | return __copy_user_nocache(dst, src, size, 0); | 211 | return __copy_user_nocache(dst, src, size, 0); |
| 212 | else | 212 | else |
| 213 | return __copy_from_user_inatomic(dst, src, size); | 213 | return __copy_from_user_inatomic(dst, src, size); |
