diff options
| author | Helge Deller <deller@gmx.de> | 2016-04-09 02:26:14 -0400 |
|---|---|---|
| committer | Helge Deller <deller@gmx.de> | 2016-05-22 15:38:56 -0400 |
| commit | 06bff6b9d7d50566a9320f64b0624957798cb08b (patch) | |
| tree | 62aae1924b900d7ef2415f1cf8e4c8a270c11fc9 /arch/parisc/include | |
| parent | fc79168a7c75423047d60a033dc4844955ccae0b (diff) | |
parisc: Simplify and speed up get_user() and put_user()
This patch simplifies the code for get_user() and put_user() a lot.
Instead of accessing kernel memory (%sr0) and userspace memory (%sr3)
hard-coded in the assembler instruction, we now preload %sr2 with either
%sr0 (for accessing KERNEL_DS) or with sr3 (to access USER_DS) and
use %sr2 in the load directly.
The generated code avoids a branch and speeds up execution by generating
less assembler instructions.
Signed-off-by: Helge Deller <deller@gmx.de>
Tested-by: Rolf Eike Beer <eike-kernel@sf-tec.de>
Diffstat (limited to 'arch/parisc/include')
| -rw-r--r-- | arch/parisc/include/asm/uaccess.h | 79 |
1 files changed, 21 insertions, 58 deletions
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 7955e43f3f3f..56b7208da9a4 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h | |||
| @@ -40,14 +40,10 @@ static inline long access_ok(int type, const void __user * addr, | |||
| 40 | #define get_user __get_user | 40 | #define get_user __get_user |
| 41 | 41 | ||
| 42 | #if !defined(CONFIG_64BIT) | 42 | #if !defined(CONFIG_64BIT) |
| 43 | #define LDD_KERNEL(ptr) BUILD_BUG() | ||
| 44 | #define LDD_USER(ptr) BUILD_BUG() | 43 | #define LDD_USER(ptr) BUILD_BUG() |
| 45 | #define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr) | ||
| 46 | #define STD_USER(x, ptr) __put_user_asm64(x, ptr) | 44 | #define STD_USER(x, ptr) __put_user_asm64(x, ptr) |
| 47 | #else | 45 | #else |
| 48 | #define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr) | ||
| 49 | #define LDD_USER(ptr) __get_user_asm("ldd", ptr) | 46 | #define LDD_USER(ptr) __get_user_asm("ldd", ptr) |
| 50 | #define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr) | ||
| 51 | #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) | 47 | #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) |
| 52 | #endif | 48 | #endif |
| 53 | 49 | ||
| @@ -80,43 +76,39 @@ struct exception_data { | |||
| 80 | unsigned long fault_addr; | 76 | unsigned long fault_addr; |
| 81 | }; | 77 | }; |
| 82 | 78 | ||
| 79 | /* | ||
| 80 | * load_sr2() preloads the space register %%sr2 - based on the value of | ||
| 81 | * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which | ||
| 82 | * is 0), or with the current value of %%sr3 to access user space (USER_DS) | ||
| 83 | * memory. The following __get_user_asm() and __put_user_asm() functions have | ||
| 84 | * %%sr2 hard-coded to access the requested memory. | ||
| 85 | */ | ||
| 86 | #define load_sr2() \ | ||
| 87 | __asm__(" or,= %0,%%r0,%%r0\n\t" \ | ||
| 88 | " mfsp %%sr3,%0\n\t" \ | ||
| 89 | " mtsp %0,%%sr2\n\t" \ | ||
| 90 | : : "r"(get_fs()) : ) | ||
| 91 | |||
| 83 | #define __get_user(x, ptr) \ | 92 | #define __get_user(x, ptr) \ |
| 84 | ({ \ | 93 | ({ \ |
| 85 | register long __gu_err __asm__ ("r8") = 0; \ | 94 | register long __gu_err __asm__ ("r8") = 0; \ |
| 86 | register long __gu_val __asm__ ("r9") = 0; \ | 95 | register long __gu_val __asm__ ("r9") = 0; \ |
| 87 | \ | 96 | \ |
| 88 | if (segment_eq(get_fs(), KERNEL_DS)) { \ | 97 | load_sr2(); \ |
| 89 | switch (sizeof(*(ptr))) { \ | 98 | switch (sizeof(*(ptr))) { \ |
| 90 | case 1: __get_kernel_asm("ldb", ptr); break; \ | ||
| 91 | case 2: __get_kernel_asm("ldh", ptr); break; \ | ||
| 92 | case 4: __get_kernel_asm("ldw", ptr); break; \ | ||
| 93 | case 8: LDD_KERNEL(ptr); break; \ | ||
| 94 | default: BUILD_BUG(); break; \ | ||
| 95 | } \ | ||
| 96 | } \ | ||
| 97 | else { \ | ||
| 98 | switch (sizeof(*(ptr))) { \ | ||
| 99 | case 1: __get_user_asm("ldb", ptr); break; \ | 99 | case 1: __get_user_asm("ldb", ptr); break; \ |
| 100 | case 2: __get_user_asm("ldh", ptr); break; \ | 100 | case 2: __get_user_asm("ldh", ptr); break; \ |
| 101 | case 4: __get_user_asm("ldw", ptr); break; \ | 101 | case 4: __get_user_asm("ldw", ptr); break; \ |
| 102 | case 8: LDD_USER(ptr); break; \ | 102 | case 8: LDD_USER(ptr); break; \ |
| 103 | default: BUILD_BUG(); break; \ | 103 | default: BUILD_BUG(); break; \ |
| 104 | } \ | ||
| 105 | } \ | 104 | } \ |
| 106 | \ | 105 | \ |
| 107 | (x) = (__force __typeof__(*(ptr))) __gu_val; \ | 106 | (x) = (__force __typeof__(*(ptr))) __gu_val; \ |
| 108 | __gu_err; \ | 107 | __gu_err; \ |
| 109 | }) | 108 | }) |
| 110 | 109 | ||
| 111 | #define __get_kernel_asm(ldx, ptr) \ | ||
| 112 | __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \ | ||
| 113 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ | ||
| 114 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
| 115 | : "r"(ptr), "1"(__gu_err) \ | ||
| 116 | : "r1"); | ||
| 117 | |||
| 118 | #define __get_user_asm(ldx, ptr) \ | 110 | #define __get_user_asm(ldx, ptr) \ |
| 119 | __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \ | 111 | __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ |
| 120 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ | 112 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ |
| 121 | : "=r"(__gu_val), "=r"(__gu_err) \ | 113 | : "=r"(__gu_val), "=r"(__gu_err) \ |
| 122 | : "r"(ptr), "1"(__gu_err) \ | 114 | : "r"(ptr), "1"(__gu_err) \ |
| @@ -127,23 +119,13 @@ struct exception_data { | |||
| 127 | register long __pu_err __asm__ ("r8") = 0; \ | 119 | register long __pu_err __asm__ ("r8") = 0; \ |
| 128 | __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ | 120 | __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ |
| 129 | \ | 121 | \ |
| 130 | if (segment_eq(get_fs(), KERNEL_DS)) { \ | 122 | load_sr2(); \ |
| 131 | switch (sizeof(*(ptr))) { \ | 123 | switch (sizeof(*(ptr))) { \ |
| 132 | case 1: __put_kernel_asm("stb", __x, ptr); break; \ | ||
| 133 | case 2: __put_kernel_asm("sth", __x, ptr); break; \ | ||
| 134 | case 4: __put_kernel_asm("stw", __x, ptr); break; \ | ||
| 135 | case 8: STD_KERNEL(__x, ptr); break; \ | ||
| 136 | default: BUILD_BUG(); break; \ | ||
| 137 | } \ | ||
| 138 | } \ | ||
| 139 | else { \ | ||
| 140 | switch (sizeof(*(ptr))) { \ | ||
| 141 | case 1: __put_user_asm("stb", __x, ptr); break; \ | 124 | case 1: __put_user_asm("stb", __x, ptr); break; \ |
| 142 | case 2: __put_user_asm("sth", __x, ptr); break; \ | 125 | case 2: __put_user_asm("sth", __x, ptr); break; \ |
| 143 | case 4: __put_user_asm("stw", __x, ptr); break; \ | 126 | case 4: __put_user_asm("stw", __x, ptr); break; \ |
| 144 | case 8: STD_USER(__x, ptr); break; \ | 127 | case 8: STD_USER(__x, ptr); break; \ |
| 145 | default: BUILD_BUG(); break; \ | 128 | default: BUILD_BUG(); break; \ |
| 146 | } \ | ||
| 147 | } \ | 129 | } \ |
| 148 | \ | 130 | \ |
| 149 | __pu_err; \ | 131 | __pu_err; \ |
| @@ -159,17 +141,9 @@ struct exception_data { | |||
| 159 | * r8/r9 are already listed as err/val. | 141 | * r8/r9 are already listed as err/val. |
| 160 | */ | 142 | */ |
| 161 | 143 | ||
| 162 | #define __put_kernel_asm(stx, x, ptr) \ | ||
| 163 | __asm__ __volatile__ ( \ | ||
| 164 | "\n1:\t" stx "\t%2,0(%1)\n\t" \ | ||
| 165 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ | ||
| 166 | : "=r"(__pu_err) \ | ||
| 167 | : "r"(ptr), "r"(x), "0"(__pu_err) \ | ||
| 168 | : "r1") | ||
| 169 | |||
| 170 | #define __put_user_asm(stx, x, ptr) \ | 144 | #define __put_user_asm(stx, x, ptr) \ |
| 171 | __asm__ __volatile__ ( \ | 145 | __asm__ __volatile__ ( \ |
| 172 | "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \ | 146 | "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ |
| 173 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ | 147 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ |
| 174 | : "=r"(__pu_err) \ | 148 | : "=r"(__pu_err) \ |
| 175 | : "r"(ptr), "r"(x), "0"(__pu_err) \ | 149 | : "r"(ptr), "r"(x), "0"(__pu_err) \ |
| @@ -178,21 +152,10 @@ struct exception_data { | |||
| 178 | 152 | ||
| 179 | #if !defined(CONFIG_64BIT) | 153 | #if !defined(CONFIG_64BIT) |
| 180 | 154 | ||
| 181 | #define __put_kernel_asm64(__val, ptr) do { \ | ||
| 182 | __asm__ __volatile__ ( \ | ||
| 183 | "\n1:\tstw %2,0(%1)" \ | ||
| 184 | "\n2:\tstw %R2,4(%1)\n\t" \ | ||
| 185 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ | ||
| 186 | ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ | ||
| 187 | : "=r"(__pu_err) \ | ||
| 188 | : "r"(ptr), "r"(__val), "0"(__pu_err) \ | ||
| 189 | : "r1"); \ | ||
| 190 | } while (0) | ||
| 191 | |||
| 192 | #define __put_user_asm64(__val, ptr) do { \ | 155 | #define __put_user_asm64(__val, ptr) do { \ |
| 193 | __asm__ __volatile__ ( \ | 156 | __asm__ __volatile__ ( \ |
| 194 | "\n1:\tstw %2,0(%%sr3,%1)" \ | 157 | "\n1:\tstw %2,0(%%sr2,%1)" \ |
| 195 | "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ | 158 | "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ |
| 196 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ | 159 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ |
| 197 | ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ | 160 | ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ |
| 198 | : "=r"(__pu_err) \ | 161 | : "=r"(__pu_err) \ |
