aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarkos Chandras <markos.chandras@imgtec.com>2013-12-11 11:47:10 -0500
committerRalf Baechle <ralf@linux-mips.org>2014-03-26 18:09:16 -0400
commit05c6516005c47c3b11582eec2137908ce1afe316 (patch)
tree64058c7570e90ffd1c59393a51fda5d6894771aa
parent0081ad2486ed75cf09b99e4bf997c513567f5b6d (diff)
MIPS: asm: uaccess: Add EVA support to copy_{in, to,from}_user
Use the EVA specific functions from memcpy.S to perform userspace operations. When get_fs() == get_ds() the usual load/store instructions are used because the destination address is located in the kernel address space region. Otherwise, the EVA specifc load/store instructions are used which will go through th TLB to perform the virtual to physical translation for the userspace address. Signed-off-by: Markos Chandras <markos.chandras@imgtec.com>
-rw-r--r--arch/mips/include/asm/uaccess.h191
1 files changed, 171 insertions, 20 deletions
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index f3624b73050f..98f4a7934529 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -781,6 +781,7 @@ extern void __put_user_unaligned_unknown(void);
781 781
782extern size_t __copy_user(void *__to, const void *__from, size_t __n); 782extern size_t __copy_user(void *__to, const void *__from, size_t __n);
783 783
784#ifndef CONFIG_EVA
784#define __invoke_copy_to_user(to, from, n) \ 785#define __invoke_copy_to_user(to, from, n) \
785({ \ 786({ \
786 register void __user *__cu_to_r __asm__("$4"); \ 787 register void __user *__cu_to_r __asm__("$4"); \
@@ -799,6 +800,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
799 __cu_len_r; \ 800 __cu_len_r; \
800}) 801})
801 802
803#define __invoke_copy_to_kernel(to, from, n) \
804 __invoke_copy_to_user(to, from, n)
805
806#endif
807
802/* 808/*
803 * __copy_to_user: - Copy a block of data into user space, with less checking. 809 * __copy_to_user: - Copy a block of data into user space, with less checking.
804 * @to: Destination address, in user space. 810 * @to: Destination address, in user space.
@@ -823,7 +829,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
823 __cu_from = (from); \ 829 __cu_from = (from); \
824 __cu_len = (n); \ 830 __cu_len = (n); \
825 might_fault(); \ 831 might_fault(); \
826 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 832 if (segment_eq(get_fs(), get_ds())) \
833 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
834 __cu_len); \
835 else \
836 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
837 __cu_len); \
827 __cu_len; \ 838 __cu_len; \
828}) 839})
829 840
@@ -838,7 +849,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
838 __cu_to = (to); \ 849 __cu_to = (to); \
839 __cu_from = (from); \ 850 __cu_from = (from); \
840 __cu_len = (n); \ 851 __cu_len = (n); \
841 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 852 if (segment_eq(get_fs(), get_ds())) \
853 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
854 __cu_len); \
855 else \
856 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
857 __cu_len); \
842 __cu_len; \ 858 __cu_len; \
843}) 859})
844 860
@@ -851,8 +867,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
851 __cu_to = (to); \ 867 __cu_to = (to); \
852 __cu_from = (from); \ 868 __cu_from = (from); \
853 __cu_len = (n); \ 869 __cu_len = (n); \
854 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ 870 if (segment_eq(get_fs(), get_ds())) \
855 __cu_len); \ 871 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
872 __cu_from,\
873 __cu_len);\
874 else \
875 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
876 __cu_from, \
877 __cu_len); \
856 __cu_len; \ 878 __cu_len; \
857}) 879})
858 880
@@ -878,14 +900,23 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
878 __cu_to = (to); \ 900 __cu_to = (to); \
879 __cu_from = (from); \ 901 __cu_from = (from); \
880 __cu_len = (n); \ 902 __cu_len = (n); \
881 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ 903 if (segment_eq(get_fs(), get_ds())) { \
882 might_fault(); \ 904 __cu_len = __invoke_copy_to_kernel(__cu_to, \
883 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 905 __cu_from, \
884 __cu_len); \ 906 __cu_len); \
907 } else { \
908 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
909 might_fault(); \
910 __cu_len = __invoke_copy_to_user(__cu_to, \
911 __cu_from, \
912 __cu_len); \
913 } \
885 } \ 914 } \
886 __cu_len; \ 915 __cu_len; \
887}) 916})
888 917
918#ifndef CONFIG_EVA
919
889#define __invoke_copy_from_user(to, from, n) \ 920#define __invoke_copy_from_user(to, from, n) \
890({ \ 921({ \
891 register void *__cu_to_r __asm__("$4"); \ 922 register void *__cu_to_r __asm__("$4"); \
@@ -909,6 +940,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
909 __cu_len_r; \ 940 __cu_len_r; \
910}) 941})
911 942
943#define __invoke_copy_from_kernel(to, from, n) \
944 __invoke_copy_from_user(to, from, n)
945
946/* For userland <-> userland operations */
947#define ___invoke_copy_in_user(to, from, n) \
948 __invoke_copy_from_user(to, from, n)
949
950/* For kernel <-> kernel operations */
951#define ___invoke_copy_in_kernel(to, from, n) \
952 __invoke_copy_from_user(to, from, n)
953
912#define __invoke_copy_from_user_inatomic(to, from, n) \ 954#define __invoke_copy_from_user_inatomic(to, from, n) \
913({ \ 955({ \
914 register void *__cu_to_r __asm__("$4"); \ 956 register void *__cu_to_r __asm__("$4"); \
@@ -932,6 +974,97 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
932 __cu_len_r; \ 974 __cu_len_r; \
933}) 975})
934 976
977#define __invoke_copy_from_kernel_inatomic(to, from, n) \
978 __invoke_copy_from_user_inatomic(to, from, n) \
979
980#else
981
982/* EVA specific functions */
983
984extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
985 size_t __n);
986extern size_t __copy_from_user_eva(void *__to, const void *__from,
987 size_t __n);
988extern size_t __copy_to_user_eva(void *__to, const void *__from,
989 size_t __n);
990extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
991
992#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
993({ \
994 register void *__cu_to_r __asm__("$4"); \
995 register const void __user *__cu_from_r __asm__("$5"); \
996 register long __cu_len_r __asm__("$6"); \
997 \
998 __cu_to_r = (to); \
999 __cu_from_r = (from); \
1000 __cu_len_r = (n); \
1001 __asm__ __volatile__( \
1002 ".set\tnoreorder\n\t" \
1003 __MODULE_JAL(func_ptr) \
1004 ".set\tnoat\n\t" \
1005 __UA_ADDU "\t$1, %1, %2\n\t" \
1006 ".set\tat\n\t" \
1007 ".set\treorder" \
1008 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1009 : \
1010 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1011 DADDI_SCRATCH, "memory"); \
1012 __cu_len_r; \
1013})
1014
1015#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
1016({ \
1017 register void *__cu_to_r __asm__("$4"); \
1018 register const void __user *__cu_from_r __asm__("$5"); \
1019 register long __cu_len_r __asm__("$6"); \
1020 \
1021 __cu_to_r = (to); \
1022 __cu_from_r = (from); \
1023 __cu_len_r = (n); \
1024 __asm__ __volatile__( \
1025 __MODULE_JAL(func_ptr) \
1026 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1027 : \
1028 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1029 DADDI_SCRATCH, "memory"); \
1030 __cu_len_r; \
1031})
1032
1033/*
1034 * Source or destination address is in userland. We need to go through
1035 * the TLB
1036 */
1037#define __invoke_copy_from_user(to, from, n) \
1038 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1039
1040#define __invoke_copy_from_user_inatomic(to, from, n) \
1041 __invoke_copy_from_user_eva_generic(to, from, n, \
1042 __copy_user_inatomic_eva)
1043
1044#define __invoke_copy_to_user(to, from, n) \
1045 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1046
1047#define ___invoke_copy_in_user(to, from, n) \
1048 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1049
1050/*
1051 * Source or destination address in the kernel. We are not going through
1052 * the TLB
1053 */
1054#define __invoke_copy_from_kernel(to, from, n) \
1055 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1056
1057#define __invoke_copy_from_kernel_inatomic(to, from, n) \
1058 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1059
1060#define __invoke_copy_to_kernel(to, from, n) \
1061 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1062
1063#define ___invoke_copy_in_kernel(to, from, n) \
1064 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1065
1066#endif /* CONFIG_EVA */
1067
935/* 1068/*
936 * __copy_from_user: - Copy a block of data from user space, with less checking. 1069 * __copy_from_user: - Copy a block of data from user space, with less checking.
937 * @to: Destination address, in kernel space. 1070 * @to: Destination address, in kernel space.
@@ -989,10 +1122,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
989 __cu_to = (to); \ 1122 __cu_to = (to); \
990 __cu_from = (from); \ 1123 __cu_from = (from); \
991 __cu_len = (n); \ 1124 __cu_len = (n); \
992 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ 1125 if (segment_eq(get_fs(), get_ds())) { \
993 might_fault(); \ 1126 __cu_len = __invoke_copy_from_kernel(__cu_to, \
994 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1127 __cu_from, \
995 __cu_len); \ 1128 __cu_len); \
1129 } else { \
1130 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
1131 might_fault(); \
1132 __cu_len = __invoke_copy_from_user(__cu_to, \
1133 __cu_from, \
1134 __cu_len); \
1135 } \
996 } \ 1136 } \
997 __cu_len; \ 1137 __cu_len; \
998}) 1138})
@@ -1006,9 +1146,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
1006 __cu_to = (to); \ 1146 __cu_to = (to); \
1007 __cu_from = (from); \ 1147 __cu_from = (from); \
1008 __cu_len = (n); \ 1148 __cu_len = (n); \
1009 might_fault(); \ 1149 if (segment_eq(get_fs(), get_ds())) { \
1010 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1150 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1011 __cu_len); \ 1151 __cu_len); \
1152 } else { \
1153 might_fault(); \
1154 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
1155 __cu_len); \
1156 } \
1012 __cu_len; \ 1157 __cu_len; \
1013}) 1158})
1014 1159
@@ -1021,11 +1166,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
1021 __cu_to = (to); \ 1166 __cu_to = (to); \
1022 __cu_from = (from); \ 1167 __cu_from = (from); \
1023 __cu_len = (n); \ 1168 __cu_len = (n); \
1024 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ 1169 if (segment_eq(get_fs(), get_ds())) { \
1025 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ 1170 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
1026 might_fault(); \ 1171 __cu_len); \
1027 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1172 } else { \
1028 __cu_len); \ 1173 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1174 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1175 might_fault(); \
1176 __cu_len = ___invoke_copy_in_user(__cu_to, \
1177 __cu_from, \
1178 __cu_len); \
1179 } \
1029 } \ 1180 } \
1030 __cu_len; \ 1181 __cu_len; \
1031}) 1182})