aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/kernel/entry.S
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2008-02-12 16:17:07 -0500
committerChris Zankel <chris@zankel.net>2008-02-13 20:41:43 -0500
commitc658eac628aa8df040dfe614556d95e6da3a9ffb (patch)
treee2211e1d5c894c29e92d4c744f504b38410efe41 /arch/xtensa/kernel/entry.S
parent71d28e6c285548106f551fde13ca6d589433d843 (diff)
[XTENSA] Add support for configurable registers and coprocessors
The Xtensa architecture allows to define custom instructions and registers. Registers that are bound to a coprocessor are only accessible if the corresponding enable bit is set, which allows to implement a 'lazy' context switch mechanism. Other registers needs to be saved and restore at the time of the context switch or during interrupt handling. This patch adds support for these additional states: - save and restore registers that are used by the compiler upon interrupt entry and exit. - context switch additional registers unbound to any coprocessor - 'lazy' context switch of registers bound to a coprocessor - ptrace interface to provide access to additional registers - update configuration files in include/asm-xtensa/variant-fsf Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa/kernel/entry.S')
-rw-r--r--arch/xtensa/kernel/entry.S295
1 files changed, 73 insertions, 222 deletions
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index b51ddb0dcf2..24770b6a5e4 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -25,6 +25,7 @@
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/signal.h> 26#include <asm/signal.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28#include <asm/variant/tie-asm.h>
28 29
29/* Unimplemented features. */ 30/* Unimplemented features. */
30 31
@@ -213,19 +214,7 @@ _user_exception:
213 214
214 /* We are back to the original stack pointer (a1) */ 215 /* We are back to the original stack pointer (a1) */
215 216
2162: 2172: /* Now, jump to the common exception handler. */
217#if XCHAL_EXTRA_SA_SIZE
218
219 /* For user exceptions, save the extra state into the user's TCB.
220 * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
221 */
222
223 GET_CURRENT(a2,a1)
224 addi a2, a2, THREAD_CP_SAVE
225 xchal_extra_store_funcbody
226#endif
227
228 /* Now, jump to the common exception handler. */
229 218
230 j common_exception 219 j common_exception
231 220
@@ -381,6 +370,10 @@ common_exception:
381 s32i a2, a1, PT_LBEG 370 s32i a2, a1, PT_LBEG
382 s32i a3, a1, PT_LEND 371 s32i a3, a1, PT_LEND
383 372
373 /* Save optional registers. */
374
375 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
376
384 /* Go to second-level dispatcher. Set up parameters to pass to the 377 /* Go to second-level dispatcher. Set up parameters to pass to the
385 * exception handler and call the exception handler. 378 * exception handler and call the exception handler.
386 */ 379 */
@@ -452,22 +445,6 @@ common_exception_return:
452 445
4534: /* a2 holds GET_CURRENT(a2,a1) */ 4464: /* a2 holds GET_CURRENT(a2,a1) */
454 447
455#if XCHAL_EXTRA_SA_SIZE
456
457 /* For user exceptions, restore the extra state from the user's TCB. */
458
459 /* Note: a2 still contains GET_CURRENT(a2,a1) */
460 addi a2, a2, THREAD_CP_SAVE
461 xchal_extra_load_funcbody
462
463 /* We must assume that xchal_extra_store_funcbody destroys
464 * registers a2..a15. FIXME, this list can eventually be
465 * reduced once real register requirements of the macro are
466 * finalized. */
467
468#endif /* XCHAL_EXTRA_SA_SIZE */
469
470
471 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 448 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
472 449
473 l32i a2, a1, PT_WINDOWBASE 450 l32i a2, a1, PT_WINDOWBASE
@@ -614,6 +591,12 @@ kernel_exception_exit:
614 591
615common_exception_exit: 592common_exception_exit:
616 593
594 /* Restore optional registers. */
595
596 load_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
597
598 /* Restore address registers. */
599
617 _bbsi.l a2, 1, 1f 600 _bbsi.l a2, 1, 1f
618 l32i a4, a1, PT_AREG4 601 l32i a4, a1, PT_AREG4
619 l32i a5, a1, PT_AREG5 602 l32i a5, a1, PT_AREG5
@@ -1146,7 +1129,6 @@ CATCH
1146 * excsave_1: a3 1129 * excsave_1: a3
1147 * 1130 *
1148 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1131 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1149 * Note: We don't need to save a2 in depc (return value)
1150 */ 1132 */
1151 1133
1152ENTRY(fast_syscall_spill_registers) 1134ENTRY(fast_syscall_spill_registers)
@@ -1162,29 +1144,31 @@ ENTRY(fast_syscall_spill_registers)
1162 1144
1163 rsr a0, SAR 1145 rsr a0, SAR
1164 xsr a3, EXCSAVE_1 # restore a3 and excsave_1 1146 xsr a3, EXCSAVE_1 # restore a3 and excsave_1
1165 s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
1166 s32i a3, a2, PT_AREG3 1147 s32i a3, a2, PT_AREG3
1148 s32i a4, a2, PT_AREG4
1149 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
1167 1150
1168 /* The spill routine might clobber a7, a11, and a15. */ 1151 /* The spill routine might clobber a7, a11, and a15. */
1169 1152
1170 s32i a7, a2, PT_AREG5 1153 s32i a7, a2, PT_AREG7
1171 s32i a11, a2, PT_AREG6 1154 s32i a11, a2, PT_AREG11
1172 s32i a15, a2, PT_AREG7 1155 s32i a15, a2, PT_AREG15
1173 1156
1174 call0 _spill_registers # destroys a3, DEPC, and SAR 1157 call0 _spill_registers # destroys a3, a4, and SAR
1175 1158
1176 /* Advance PC, restore registers and SAR, and return from exception. */ 1159 /* Advance PC, restore registers and SAR, and return from exception. */
1177 1160
1178 l32i a3, a2, PT_AREG4 1161 l32i a3, a2, PT_AREG5
1162 l32i a4, a2, PT_AREG4
1179 l32i a0, a2, PT_AREG0 1163 l32i a0, a2, PT_AREG0
1180 wsr a3, SAR 1164 wsr a3, SAR
1181 l32i a3, a2, PT_AREG3 1165 l32i a3, a2, PT_AREG3
1182 1166
1183 /* Restore clobbered registers. */ 1167 /* Restore clobbered registers. */
1184 1168
1185 l32i a7, a2, PT_AREG5 1169 l32i a7, a2, PT_AREG7
1186 l32i a11, a2, PT_AREG6 1170 l32i a11, a2, PT_AREG11
1187 l32i a15, a2, PT_AREG7 1171 l32i a15, a2, PT_AREG15
1188 1172
1189 movi a2, 0 1173 movi a2, 0
1190 rfe 1174 rfe
@@ -1257,9 +1241,9 @@ fast_syscall_spill_registers_fixup:
1257 1241
1258 movi a3, exc_table 1242 movi a3, exc_table
1259 rsr a0, EXCCAUSE 1243 rsr a0, EXCCAUSE
1260 addx4 a0, a0, a3 # find entry in table 1244 addx4 a0, a0, a3 # find entry in table
1261 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1245 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
1262 jx a0 1246 jx a0
1263 1247
1264fast_syscall_spill_registers_fixup_return: 1248fast_syscall_spill_registers_fixup_return:
1265 1249
@@ -1297,7 +1281,7 @@ fast_syscall_spill_registers_fixup_return:
1297 * This is not a real function. The following conditions must be met: 1281 * This is not a real function. The following conditions must be met:
1298 * 1282 *
1299 * - must be called with call0. 1283 * - must be called with call0.
1300 * - uses DEPC, a3 and SAR. 1284 * - uses a3, a4 and SAR.
1301 * - the last 'valid' register of each frame are clobbered. 1285 * - the last 'valid' register of each frame are clobbered.
1302 * - the caller must have registered a fixup handler 1286 * - the caller must have registered a fixup handler
1303 * (or be inside a critical section) 1287 * (or be inside a critical section)
@@ -1309,41 +1293,39 @@ ENTRY(_spill_registers)
1309 /* 1293 /*
1310 * Rotate ws so that the current windowbase is at bit 0. 1294 * Rotate ws so that the current windowbase is at bit 0.
1311 * Assume ws = xxxwww1yy (www1 current window frame). 1295 * Assume ws = xxxwww1yy (www1 current window frame).
1312 * Rotate ws right so that a2 = yyxxxwww1. 1296 * Rotate ws right so that a4 = yyxxxwww1.
1313 */ 1297 */
1314 1298
1315 wsr a2, DEPC # preserve a2 1299 rsr a4, WINDOWBASE
1316 rsr a2, WINDOWBASE
1317 rsr a3, WINDOWSTART # a3 = xxxwww1yy 1300 rsr a3, WINDOWSTART # a3 = xxxwww1yy
1318 ssr a2 # holds WB 1301 ssr a4 # holds WB
1319 slli a2, a3, WSBITS 1302 slli a4, a3, WSBITS
1320 or a3, a3, a2 # a3 = xxxwww1yyxxxwww1yy 1303 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
1321 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1304 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
1322 1305
1323 /* We are done if there are no more than the current register frame. */ 1306 /* We are done if there are no more than the current register frame. */
1324 1307
1325 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1308 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
1326 movi a2, (1 << (WSBITS-1)) 1309 movi a4, (1 << (WSBITS-1))
1327 _beqz a3, .Lnospill # only one active frame? jump 1310 _beqz a3, .Lnospill # only one active frame? jump
1328 1311
1329 /* We want 1 at the top, so that we return to the current windowbase */ 1312 /* We want 1 at the top, so that we return to the current windowbase */
1330 1313
1331 or a3, a3, a2 # 1yyxxxwww 1314 or a3, a3, a4 # 1yyxxxwww
1332 1315
1333 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1316 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1334 1317
1335 wsr a3, WINDOWSTART # save shifted windowstart 1318 wsr a3, WINDOWSTART # save shifted windowstart
1336 neg a2, a3 1319 neg a4, a3
1337 and a3, a2, a3 # first bit set from right: 000010000 1320 and a3, a4, a3 # first bit set from right: 000010000
1338 1321
1339 ffs_ws a2, a3 # a2: shifts to skip empty frames 1322 ffs_ws a4, a3 # a4: shifts to skip empty frames
1340 movi a3, WSBITS 1323 movi a3, WSBITS
1341 sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right 1324 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
1342 ssr a2 # save in SAR for later. 1325 ssr a4 # save in SAR for later.
1343 1326
1344 rsr a3, WINDOWBASE 1327 rsr a3, WINDOWBASE
1345 add a3, a3, a2 1328 add a3, a3, a4
1346 rsr a2, DEPC # restore a2
1347 wsr a3, WINDOWBASE 1329 wsr a3, WINDOWBASE
1348 rsync 1330 rsync
1349 1331
@@ -1373,7 +1355,6 @@ ENTRY(_spill_registers)
1373 j .Lc12c 1355 j .Lc12c
1374 1356
1375.Lnospill: 1357.Lnospill:
1376 rsr a2, DEPC
1377 ret 1358 ret
1378 1359
1379.Lloop: _bbsi.l a3, 1, .Lc4 1360.Lloop: _bbsi.l a3, 1, .Lc4
@@ -1810,154 +1791,6 @@ ENTRY(fast_store_prohibited)
18101: j _user_exception 17911: j _user_exception
1811 1792
1812 1793
1813#if XCHAL_EXTRA_SA_SIZE
1814
1815#warning fast_coprocessor untested
1816
1817/*
1818 * Entry condition:
1819 *
1820 * a0: trashed, original value saved on stack (PT_AREG0)
1821 * a1: a1
1822 * a2: new stack pointer, original in DEPC
1823 * a3: dispatch table
1824 * depc: a2, original value saved on stack (PT_DEPC)
1825 * excsave_1: a3
1826 *
1827 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1828 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1829 */
1830
1831ENTRY(fast_coprocessor_double)
1832 wsr a0, EXCSAVE_1
1833 movi a0, unrecoverable_exception
1834 callx0 a0
1835
1836ENTRY(fast_coprocessor)
1837
1838 /* Fatal if we are in a double exception. */
1839
1840 l32i a0, a2, PT_DEPC
1841 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
1842
1843 /* Save some registers a1, a3, a4, SAR */
1844
1845 xsr a3, EXCSAVE_1
1846 s32i a3, a2, PT_AREG3
1847 rsr a3, SAR
1848 s32i a4, a2, PT_AREG4
1849 s32i a1, a2, PT_AREG1
1850 s32i a5, a1, PT_AREG5
1851 s32i a3, a2, PT_SAR
1852 mov a1, a2
1853
1854 /* Currently, the HAL macros only guarantee saving a0 and a1.
1855 * These can and will be refined in the future, but for now,
1856 * just save the remaining registers of a2...a15.
1857 */
1858 s32i a6, a1, PT_AREG6
1859 s32i a7, a1, PT_AREG7
1860 s32i a8, a1, PT_AREG8
1861 s32i a9, a1, PT_AREG9
1862 s32i a10, a1, PT_AREG10
1863 s32i a11, a1, PT_AREG11
1864 s32i a12, a1, PT_AREG12
1865 s32i a13, a1, PT_AREG13
1866 s32i a14, a1, PT_AREG14
1867 s32i a15, a1, PT_AREG15
1868
1869 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
1870
1871 rsr a0, EXCCAUSE
1872 addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
1873
1874 /* Set corresponding CPENABLE bit */
1875
1876 movi a4, 1
1877 ssl a3 # SAR: 32 - coprocessor_number
1878 rsr a5, CPENABLE
1879 sll a4, a4
1880 or a4, a5, a4
1881 wsr a4, CPENABLE
1882 rsync
1883 movi a5, coprocessor_info # list of owner and offset into cp_save
1884 addx8 a0, a4, a5 # entry for CP
1885
1886 bne a4, a5, .Lload # bit wasn't set before, cp not in use
1887
1888 /* Now compare the current task with the owner of the coprocessor.
1889 * If they are the same, there is no reason to save or restore any
1890 * coprocessor state. Having already enabled the coprocessor,
1891 * branch ahead to return.
1892 */
1893 GET_CURRENT(a5,a1)
1894 l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
1895 beq a4, a5, .Ldone
1896
1897 /* Find location to dump current coprocessor state:
1898 * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
1899 *
1900 * Note: a0 pointer to the entry in the coprocessor owner table,
1901 * a3 coprocessor number,
1902 * a4 current owner of coprocessor.
1903 */
1904 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1905 addi a2, a4, THREAD_CP_SAVE
1906 add a2, a2, a5
1907
1908 /* Store current coprocessor states. (a5 still has CP number) */
1909
1910 xchal_cpi_store_funcbody
1911
1912 /* The macro might have destroyed a3 (coprocessor number), but
1913 * SAR still has 32 - coprocessor_number!
1914 */
1915 movi a3, 32
1916 rsr a4, SAR
1917 sub a3, a3, a4
1918
1919.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
1920 * the coprocessor owner table.
1921 *
1922 * Note: a0 pointer to the entry in the coprocessor owner table,
1923 * a3 coprocessor number.
1924 */
1925 GET_CURRENT(a4,a1)
1926 s32i a4, a0, 0
1927
1928 /* Find location from where to restore the current coprocessor state.*/
1929
1930 l32i a5, a0, COPROCESSOR_INFO_OFFSET
1931 addi a2, a4, THREAD_CP_SAVE
1932 add a2, a2, a4
1933
1934 xchal_cpi_load_funcbody
1935
1936 /* We must assume that the xchal_cpi_store_funcbody macro destroyed
1937 * registers a2..a15.
1938 */
1939
1940.Ldone: l32i a15, a1, PT_AREG15
1941 l32i a14, a1, PT_AREG14
1942 l32i a13, a1, PT_AREG13
1943 l32i a12, a1, PT_AREG12
1944 l32i a11, a1, PT_AREG11
1945 l32i a10, a1, PT_AREG10
1946 l32i a9, a1, PT_AREG9
1947 l32i a8, a1, PT_AREG8
1948 l32i a7, a1, PT_AREG7
1949 l32i a6, a1, PT_AREG6
1950 l32i a5, a1, PT_AREG5
1951 l32i a4, a1, PT_AREG4
1952 l32i a3, a1, PT_AREG3
1953 l32i a2, a1, PT_AREG2
1954 l32i a0, a1, PT_AREG0
1955 l32i a1, a1, PT_AREG1
1956
1957 rfe
1958
1959#endif /* XCHAL_EXTRA_SA_SIZE */
1960
1961/* 1794/*
1962 * System Calls. 1795 * System Calls.
1963 * 1796 *
@@ -2066,20 +1899,36 @@ ENTRY(_switch_to)
2066 1899
2067 entry a1, 16 1900 entry a1, 16
2068 1901
2069 mov a4, a3 # preserve a3 1902 mov a12, a2 # preserve 'prev' (a2)
1903 mov a13, a3 # and 'next' (a3)
2070 1904
2071 s32i a0, a2, THREAD_RA # save return address 1905 l32i a4, a2, TASK_THREAD_INFO
2072 s32i a1, a2, THREAD_SP # save stack pointer 1906 l32i a5, a3, TASK_THREAD_INFO
2073 1907
2074 /* Disable ints while we manipulate the stack pointer; spill regs. */ 1908 save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
2075 1909
2076 movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL 1910 s32i a0, a12, THREAD_RA # save return address
2077 xsr a5, PS 1911 s32i a1, a12, THREAD_SP # save stack pointer
1912
1913 /* Disable ints while we manipulate the stack pointer. */
1914
1915 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
1916 xsr a14, PS
2078 rsr a3, EXCSAVE_1 1917 rsr a3, EXCSAVE_1
2079 rsync 1918 rsync
2080 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 1919 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
2081 1920
2082 call0 _spill_registers 1921 /* Switch CPENABLE */
1922
1923#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
1924 l32i a3, a5, THREAD_CPENABLE
1925 xsr a3, CPENABLE
1926 s32i a3, a4, THREAD_CPENABLE
1927#endif
1928
1929 /* Flush register file. */
1930
1931 call0 _spill_registers # destroys a3, a4, and SAR
2083 1932
2084 /* Set kernel stack (and leave critical section) 1933 /* Set kernel stack (and leave critical section)
2085 * Note: It's save to set it here. The stack will not be overwritten 1934 * Note: It's save to set it here. The stack will not be overwritten
@@ -2087,19 +1936,21 @@ ENTRY(_switch_to)
2087 * we return from kernel space. 1936 * we return from kernel space.
2088 */ 1937 */
2089 1938
2090 l32i a0, a4, TASK_THREAD_INFO
2091 rsr a3, EXCSAVE_1 # exc_table 1939 rsr a3, EXCSAVE_1 # exc_table
2092 movi a1, 0 1940 movi a6, 0
2093 addi a0, a0, PT_REGS_OFFSET 1941 addi a7, a5, PT_REGS_OFFSET
2094 s32i a1, a3, EXC_TABLE_FIXUP 1942 s32i a6, a3, EXC_TABLE_FIXUP
2095 s32i a0, a3, EXC_TABLE_KSTK 1943 s32i a7, a3, EXC_TABLE_KSTK
2096 1944
2097 /* restore context of the task that 'next' addresses */ 1945 /* restore context of the task that 'next' addresses */
2098 1946
2099 l32i a0, a4, THREAD_RA /* restore return address */ 1947 l32i a0, a13, THREAD_RA # restore return address
2100 l32i a1, a4, THREAD_SP /* restore stack pointer */ 1948 l32i a1, a13, THREAD_SP # restore stack pointer
1949
1950 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
2101 1951
2102 wsr a5, PS 1952 wsr a14, PS
1953 mov a2, a12 # return 'prev'
2103 rsync 1954 rsync
2104 1955
2105 retw 1956 retw