aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips/uaccess.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-02-19 11:59:24 -0500
committerRalf Baechle <ralf@linux-mips.org>2007-02-19 20:26:42 -0500
commite03b526932a9ae1ff20b47459c040f3c6407f625 (patch)
tree6c1753fc5a0497621b05c7dae9d3d686503bc5d7 /include/asm-mips/uaccess.h
parent269dd2b2526d046d8b43554ff27b486e2ddb3f08 (diff)
[MIPS] Fixup copy_from_user_inatomic
From the 01408c4939479ec46c15aa7ef6e2406be50eeeca log message: The problem is that when we write to a file, the copy from userspace to pagecache is first done with preemption disabled, so if the source address is not immediately available the copy fails *and* *zeros* *the* *destination*. This is a problem because a concurrent read (which admittedly is an odd thing to do) might see zeros rather that was there before the write, or what was there after, or some mixture of the two (any of these being a reasonable thing to see). If the copy did fail, it will immediately be retried with preemption re-enabled so any transient problem with accessing the source won't cause an error. The first copying does not need to zero any uncopied bytes, and doing so causes the problem. It uses copy_from_user_atomic rather than copy_from_user so the simple expedient is to change copy_from_user_atomic to *not* zero out bytes on failure. < --- end cite --- > This patch finally implements at least a not so pretty solution by duplicating the relevant part of __copy_user. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/uaccess.h')
-rw-r--r--include/asm-mips/uaccess.h51
1 files changed, 49 insertions, 2 deletions
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 3eff8d8fe28a..c62c20e7b5c6 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -435,8 +435,32 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
435 __cu_len; \ 435 __cu_len; \
436}) 436})
437 437
438#define __copy_to_user_inatomic __copy_to_user 438#define __copy_to_user_inatomic(to,from,n) \
439#define __copy_from_user_inatomic __copy_from_user 439({ \
440 void __user *__cu_to; \
441 const void *__cu_from; \
442 long __cu_len; \
443 \
444 __cu_to = (to); \
445 __cu_from = (from); \
446 __cu_len = (n); \
447 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
448 __cu_len; \
449})
450
451#define __copy_from_user_inatomic(to,from,n) \
452({ \
453 void *__cu_to; \
454 const void __user *__cu_from; \
455 long __cu_len; \
456 \
457 __cu_to = (to); \
458 __cu_from = (from); \
459 __cu_len = (n); \
460 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
461 __cu_len); \
462 __cu_len; \
463})
440 464
441/* 465/*
442 * copy_to_user: - Copy a block of data into user space. 466 * copy_to_user: - Copy a block of data into user space.
@@ -490,6 +514,29 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
490 __cu_len_r; \ 514 __cu_len_r; \
491}) 515})
492 516
517#define __invoke_copy_from_user_inatomic(to,from,n) \
518({ \
519 register void *__cu_to_r __asm__ ("$4"); \
520 register const void __user *__cu_from_r __asm__ ("$5"); \
521 register long __cu_len_r __asm__ ("$6"); \
522 \
523 __cu_to_r = (to); \
524 __cu_from_r = (from); \
525 __cu_len_r = (n); \
526 __asm__ __volatile__( \
527 ".set\tnoreorder\n\t" \
528 __MODULE_JAL(__copy_user_inatomic) \
529 ".set\tnoat\n\t" \
530 __UA_ADDU "\t$1, %1, %2\n\t" \
531 ".set\tat\n\t" \
532 ".set\treorder" \
533 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
534 : \
535 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
536 "memory"); \
537 __cu_len_r; \
538})
539
493/* 540/*
494 * __copy_from_user: - Copy a block of data from user space, with less checking. 541 * __copy_from_user: - Copy a block of data from user space, with less checking.
495 * @to: Destination address, in kernel space. 542 * @to: Destination address, in kernel space.