aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2015-05-24 11:31:44 -0400
committerRalf Baechle <ralf@linux-mips.org>2015-06-21 15:54:20 -0400
commit12060666f5c0659d28e31cbf6973af1dfa43c0e7 (patch)
tree4f01ced816504d892db9bbeb9da05a020e1f82df /arch
parentdf115f3ee9ea703e1209392cd08f8d6783244721 (diff)
MIPS: Optimise non-EVA kernel user memory accesses
Commits ac1d8590d3ae (MIPS: asm: uaccess: Use EVA instructions wrappers), 05c6516005c4 (MIPS: asm: uaccess: Add EVA support to copy_{in, to,from}_user) & e3a9b07a9caf (MIPS: asm: uaccess: Add EVA support for str*_user operations) added checks to various user memory access functions & macros in order to determine whether to perform standard memory accesses or their EVA userspace equivalents. In kernels built without support for EVA these checks are entirely redundant. Avoid emitting them & allow the compiler to optimise out the EVA userspace code in such kernels by checking config_enabled(CONFIG_EVA). This reduces the size of a malta_defconfig kernel built using GCC 4.9.2 by approximately 33KB (from 5995072 to 5962304 bytes). Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Markos Chandras <markos.chandras@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-kernel@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: http://patchwork.linux-mips.org/patch/10165/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/uaccess.h47
1 files changed, 31 insertions, 16 deletions
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bf8b32450ef6..6ed061dfa3ee 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -78,6 +78,21 @@ extern u64 __ua_limit;
78 78
79#define segment_eq(a, b) ((a).seg == (b).seg) 79#define segment_eq(a, b) ((a).seg == (b).seg)
80 80
81/*
82 * eva_kernel_access() - determine whether kernel memory access on an EVA system
83 *
84 * Determines whether memory accesses should be performed to kernel memory
85 * on a system using Extended Virtual Addressing (EVA).
86 *
87 * Return: true if a kernel memory access on an EVA system, else false.
88 */
89static inline bool eva_kernel_access(void)
90{
91 if (!config_enabled(CONFIG_EVA))
92 return false;
93
94 return segment_eq(get_fs(), get_ds());
95}
81 96
82/* 97/*
83 * Is a address valid? This does a straighforward calculation rather 98 * Is a address valid? This does a straighforward calculation rather
@@ -281,7 +296,7 @@ do { \
281({ \ 296({ \
282 int __gu_err; \ 297 int __gu_err; \
283 \ 298 \
284 if (segment_eq(get_fs(), get_ds())) { \ 299 if (eva_kernel_access()) { \
285 __get_kernel_common((x), size, ptr); \ 300 __get_kernel_common((x), size, ptr); \
286 } else { \ 301 } else { \
287 __chk_user_ptr(ptr); \ 302 __chk_user_ptr(ptr); \
@@ -297,7 +312,7 @@ do { \
297 \ 312 \
298 might_fault(); \ 313 might_fault(); \
299 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \ 314 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
300 if (segment_eq(get_fs(), get_ds())) \ 315 if (eva_kernel_access()) \
301 __get_kernel_common((x), size, __gu_ptr); \ 316 __get_kernel_common((x), size, __gu_ptr); \
302 else \ 317 else \
303 __get_user_common((x), size, __gu_ptr); \ 318 __get_user_common((x), size, __gu_ptr); \
@@ -422,7 +437,7 @@ do { \
422 int __pu_err = 0; \ 437 int __pu_err = 0; \
423 \ 438 \
424 __pu_val = (x); \ 439 __pu_val = (x); \
425 if (segment_eq(get_fs(), get_ds())) { \ 440 if (eva_kernel_access()) { \
426 __put_kernel_common(ptr, size); \ 441 __put_kernel_common(ptr, size); \
427 } else { \ 442 } else { \
428 __chk_user_ptr(ptr); \ 443 __chk_user_ptr(ptr); \
@@ -439,7 +454,7 @@ do { \
439 \ 454 \
440 might_fault(); \ 455 might_fault(); \
441 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 456 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
442 if (segment_eq(get_fs(), get_ds())) \ 457 if (eva_kernel_access()) \
443 __put_kernel_common(__pu_addr, size); \ 458 __put_kernel_common(__pu_addr, size); \
444 else \ 459 else \
445 __put_user_common(__pu_addr, size); \ 460 __put_user_common(__pu_addr, size); \
@@ -833,7 +848,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
833 __cu_from = (from); \ 848 __cu_from = (from); \
834 __cu_len = (n); \ 849 __cu_len = (n); \
835 might_fault(); \ 850 might_fault(); \
836 if (segment_eq(get_fs(), get_ds())) \ 851 if (eva_kernel_access()) \
837 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ 852 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
838 __cu_len); \ 853 __cu_len); \
839 else \ 854 else \
@@ -853,7 +868,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
853 __cu_to = (to); \ 868 __cu_to = (to); \
854 __cu_from = (from); \ 869 __cu_from = (from); \
855 __cu_len = (n); \ 870 __cu_len = (n); \
856 if (segment_eq(get_fs(), get_ds())) \ 871 if (eva_kernel_access()) \
857 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ 872 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
858 __cu_len); \ 873 __cu_len); \
859 else \ 874 else \
@@ -871,7 +886,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
871 __cu_to = (to); \ 886 __cu_to = (to); \
872 __cu_from = (from); \ 887 __cu_from = (from); \
873 __cu_len = (n); \ 888 __cu_len = (n); \
874 if (segment_eq(get_fs(), get_ds())) \ 889 if (eva_kernel_access()) \
875 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \ 890 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
876 __cu_from,\ 891 __cu_from,\
877 __cu_len);\ 892 __cu_len);\
@@ -904,7 +919,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
904 __cu_to = (to); \ 919 __cu_to = (to); \
905 __cu_from = (from); \ 920 __cu_from = (from); \
906 __cu_len = (n); \ 921 __cu_len = (n); \
907 if (segment_eq(get_fs(), get_ds())) { \ 922 if (eva_kernel_access()) { \
908 __cu_len = __invoke_copy_to_kernel(__cu_to, \ 923 __cu_len = __invoke_copy_to_kernel(__cu_to, \
909 __cu_from, \ 924 __cu_from, \
910 __cu_len); \ 925 __cu_len); \
@@ -1126,7 +1141,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1126 __cu_to = (to); \ 1141 __cu_to = (to); \
1127 __cu_from = (from); \ 1142 __cu_from = (from); \
1128 __cu_len = (n); \ 1143 __cu_len = (n); \
1129 if (segment_eq(get_fs(), get_ds())) { \ 1144 if (eva_kernel_access()) { \
1130 __cu_len = __invoke_copy_from_kernel(__cu_to, \ 1145 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1131 __cu_from, \ 1146 __cu_from, \
1132 __cu_len); \ 1147 __cu_len); \
@@ -1150,7 +1165,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1150 __cu_to = (to); \ 1165 __cu_to = (to); \
1151 __cu_from = (from); \ 1166 __cu_from = (from); \
1152 __cu_len = (n); \ 1167 __cu_len = (n); \
1153 if (segment_eq(get_fs(), get_ds())) { \ 1168 if (eva_kernel_access()) { \
1154 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \ 1169 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1155 __cu_len); \ 1170 __cu_len); \
1156 } else { \ 1171 } else { \
@@ -1170,7 +1185,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1170 __cu_to = (to); \ 1185 __cu_to = (to); \
1171 __cu_from = (from); \ 1186 __cu_from = (from); \
1172 __cu_len = (n); \ 1187 __cu_len = (n); \
1173 if (segment_eq(get_fs(), get_ds())) { \ 1188 if (eva_kernel_access()) { \
1174 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \ 1189 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
1175 __cu_len); \ 1190 __cu_len); \
1176 } else { \ 1191 } else { \
@@ -1250,7 +1265,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
1250{ 1265{
1251 long res; 1266 long res;
1252 1267
1253 if (segment_eq(get_fs(), get_ds())) { 1268 if (eva_kernel_access()) {
1254 __asm__ __volatile__( 1269 __asm__ __volatile__(
1255 "move\t$4, %1\n\t" 1270 "move\t$4, %1\n\t"
1256 "move\t$5, %2\n\t" 1271 "move\t$5, %2\n\t"
@@ -1299,7 +1314,7 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
1299{ 1314{
1300 long res; 1315 long res;
1301 1316
1302 if (segment_eq(get_fs(), get_ds())) { 1317 if (eva_kernel_access()) {
1303 __asm__ __volatile__( 1318 __asm__ __volatile__(
1304 "move\t$4, %1\n\t" 1319 "move\t$4, %1\n\t"
1305 "move\t$5, %2\n\t" 1320 "move\t$5, %2\n\t"
@@ -1343,7 +1358,7 @@ static inline long strlen_user(const char __user *s)
1343{ 1358{
1344 long res; 1359 long res;
1345 1360
1346 if (segment_eq(get_fs(), get_ds())) { 1361 if (eva_kernel_access()) {
1347 __asm__ __volatile__( 1362 __asm__ __volatile__(
1348 "move\t$4, %1\n\t" 1363 "move\t$4, %1\n\t"
1349 __MODULE_JAL(__strlen_kernel_asm) 1364 __MODULE_JAL(__strlen_kernel_asm)
@@ -1370,7 +1385,7 @@ static inline long __strnlen_user(const char __user *s, long n)
1370{ 1385{
1371 long res; 1386 long res;
1372 1387
1373 if (segment_eq(get_fs(), get_ds())) { 1388 if (eva_kernel_access()) {
1374 __asm__ __volatile__( 1389 __asm__ __volatile__(
1375 "move\t$4, %1\n\t" 1390 "move\t$4, %1\n\t"
1376 "move\t$5, %2\n\t" 1391 "move\t$5, %2\n\t"
@@ -1411,7 +1426,7 @@ static inline long strnlen_user(const char __user *s, long n)
1411 long res; 1426 long res;
1412 1427
1413 might_fault(); 1428 might_fault();
1414 if (segment_eq(get_fs(), get_ds())) { 1429 if (eva_kernel_access()) {
1415 __asm__ __volatile__( 1430 __asm__ __volatile__(
1416 "move\t$4, %1\n\t" 1431 "move\t$4, %1\n\t"
1417 "move\t$5, %2\n\t" 1432 "move\t$5, %2\n\t"