diff options
Diffstat (limited to 'arch/xtensa/kernel/entry.S')
-rw-r--r-- | arch/xtensa/kernel/entry.S | 355 |
1 files changed, 86 insertions, 269 deletions
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 91a689eca43d..dfd35dcc1cb5 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S | |||
@@ -25,10 +25,10 @@ | |||
25 | #include <asm/page.h> | 25 | #include <asm/page.h> |
26 | #include <asm/signal.h> | 26 | #include <asm/signal.h> |
27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
28 | #include <asm/variant/tie-asm.h> | ||
28 | 29 | ||
29 | /* Unimplemented features. */ | 30 | /* Unimplemented features. */ |
30 | 31 | ||
31 | #undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION | ||
32 | #undef KERNEL_STACK_OVERFLOW_CHECK | 32 | #undef KERNEL_STACK_OVERFLOW_CHECK |
33 | #undef PREEMPTIBLE_KERNEL | 33 | #undef PREEMPTIBLE_KERNEL |
34 | #undef ALLOCA_EXCEPTION_IN_IRAM | 34 | #undef ALLOCA_EXCEPTION_IN_IRAM |
@@ -214,19 +214,7 @@ _user_exception: | |||
214 | 214 | ||
215 | /* We are back to the original stack pointer (a1) */ | 215 | /* We are back to the original stack pointer (a1) */ |
216 | 216 | ||
217 | 2: | 217 | 2: /* Now, jump to the common exception handler. */ |
218 | #if XCHAL_EXTRA_SA_SIZE | ||
219 | |||
220 | /* For user exceptions, save the extra state into the user's TCB. | ||
221 | * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15 | ||
222 | */ | ||
223 | |||
224 | GET_CURRENT(a2,a1) | ||
225 | addi a2, a2, THREAD_CP_SAVE | ||
226 | xchal_extra_store_funcbody | ||
227 | #endif | ||
228 | |||
229 | /* Now, jump to the common exception handler. */ | ||
230 | 218 | ||
231 | j common_exception | 219 | j common_exception |
232 | 220 | ||
@@ -382,6 +370,10 @@ common_exception: | |||
382 | s32i a2, a1, PT_LBEG | 370 | s32i a2, a1, PT_LBEG |
383 | s32i a3, a1, PT_LEND | 371 | s32i a3, a1, PT_LEND |
384 | 372 | ||
373 | /* Save optional registers. */ | ||
374 | |||
375 | save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT | ||
376 | |||
385 | /* Go to second-level dispatcher. Set up parameters to pass to the | 377 | /* Go to second-level dispatcher. Set up parameters to pass to the |
386 | * exception handler and call the exception handler. | 378 | * exception handler and call the exception handler. |
387 | */ | 379 | */ |
@@ -403,74 +395,49 @@ common_exception_return: | |||
403 | /* Jump if we are returning from kernel exceptions. */ | 395 | /* Jump if we are returning from kernel exceptions. */ |
404 | 396 | ||
405 | 1: l32i a3, a1, PT_PS | 397 | 1: l32i a3, a1, PT_PS |
406 | _bbsi.l a3, PS_UM_BIT, 2f | 398 | _bbci.l a3, PS_UM_BIT, 4f |
407 | j kernel_exception_exit | ||
408 | 399 | ||
409 | /* Specific to a user exception exit: | 400 | /* Specific to a user exception exit: |
410 | * We need to check some flags for signal handling and rescheduling, | 401 | * We need to check some flags for signal handling and rescheduling, |
411 | * and have to restore WB and WS, extra states, and all registers | 402 | * and have to restore WB and WS, extra states, and all registers |
412 | * in the register file that were in use in the user task. | 403 | * in the register file that were in use in the user task. |
413 | */ | 404 | * Note that we don't disable interrupts here. |
414 | |||
415 | 2: wsr a3, PS /* disable interrupts */ | ||
416 | |||
417 | /* Check for signals (keep interrupts disabled while we read TI_FLAGS) | ||
418 | * Note: PS.INTLEVEL = 0, PS.EXCM = 1 | ||
419 | */ | 405 | */ |
420 | 406 | ||
421 | GET_THREAD_INFO(a2,a1) | 407 | GET_THREAD_INFO(a2,a1) |
422 | l32i a4, a2, TI_FLAGS | 408 | l32i a4, a2, TI_FLAGS |
423 | 409 | ||
424 | /* Enable interrupts again. | ||
425 | * Note: When we get here, we certainly have handled any interrupts. | ||
426 | * (Hint: There is only one user exception frame on stack) | ||
427 | */ | ||
428 | |||
429 | movi a3, 1 << PS_WOE_BIT | ||
430 | |||
431 | _bbsi.l a4, TIF_NEED_RESCHED, 3f | 410 | _bbsi.l a4, TIF_NEED_RESCHED, 3f |
432 | _bbci.l a4, TIF_SIGPENDING, 4f | 411 | _bbci.l a4, TIF_SIGPENDING, 4f |
433 | 412 | ||
434 | #ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION | ||
435 | l32i a4, a1, PT_DEPC | 413 | l32i a4, a1, PT_DEPC |
436 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f | 414 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f |
437 | #endif | ||
438 | 415 | ||
439 | /* Reenable interrupts and call do_signal() */ | 416 | /* Call do_signal() */ |
440 | 417 | ||
441 | wsr a3, PS | ||
442 | movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*) | 418 | movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*) |
443 | mov a6, a1 | 419 | mov a6, a1 |
444 | movi a7, 0 | 420 | movi a7, 0 |
445 | callx4 a4 | 421 | callx4 a4 |
446 | j 1b | 422 | j 1b |
447 | 423 | ||
448 | 3: /* Reenable interrupts and reschedule */ | 424 | 3: /* Reschedule */ |
449 | 425 | ||
450 | wsr a3, PS | ||
451 | movi a4, schedule # void schedule (void) | 426 | movi a4, schedule # void schedule (void) |
452 | callx4 a4 | 427 | callx4 a4 |
453 | j 1b | 428 | j 1b |
454 | 429 | ||
455 | /* Restore the state of the task and return from the exception. */ | 430 | 4: /* Restore optional registers. */ |
456 | |||
457 | 4: /* a2 holds GET_CURRENT(a2,a1) */ | ||
458 | |||
459 | #if XCHAL_EXTRA_SA_SIZE | ||
460 | 431 | ||
461 | /* For user exceptions, restore the extra state from the user's TCB. */ | 432 | load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT |
462 | 433 | ||
463 | /* Note: a2 still contains GET_CURRENT(a2,a1) */ | 434 | wsr a3, PS /* disable interrupts */ |
464 | addi a2, a2, THREAD_CP_SAVE | ||
465 | xchal_extra_load_funcbody | ||
466 | 435 | ||
467 | /* We must assume that xchal_extra_store_funcbody destroys | 436 | _bbci.l a3, PS_UM_BIT, kernel_exception_exit |
468 | * registers a2..a15. FIXME, this list can eventually be | ||
469 | * reduced once real register requirements of the macro are | ||
470 | * finalized. */ | ||
471 | 437 | ||
472 | #endif /* XCHAL_EXTRA_SA_SIZE */ | 438 | user_exception_exit: |
473 | 439 | ||
440 | /* Restore the state of the task and return from the exception. */ | ||
474 | 441 | ||
475 | /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ | 442 | /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ |
476 | 443 | ||
@@ -536,10 +503,6 @@ common_exception_return: | |||
536 | 503 | ||
537 | kernel_exception_exit: | 504 | kernel_exception_exit: |
538 | 505 | ||
539 | /* Disable interrupts (a3 holds PT_PS) */ | ||
540 | |||
541 | wsr a3, PS | ||
542 | |||
543 | #ifdef PREEMPTIBLE_KERNEL | 506 | #ifdef PREEMPTIBLE_KERNEL |
544 | 507 | ||
545 | #ifdef CONFIG_PREEMPT | 508 | #ifdef CONFIG_PREEMPT |
@@ -618,6 +581,8 @@ kernel_exception_exit: | |||
618 | 581 | ||
619 | common_exception_exit: | 582 | common_exception_exit: |
620 | 583 | ||
584 | /* Restore address registers. */ | ||
585 | |||
621 | _bbsi.l a2, 1, 1f | 586 | _bbsi.l a2, 1, 1f |
622 | l32i a4, a1, PT_AREG4 | 587 | l32i a4, a1, PT_AREG4 |
623 | l32i a5, a1, PT_AREG5 | 588 | l32i a5, a1, PT_AREG5 |
@@ -1150,7 +1115,6 @@ CATCH | |||
1150 | * excsave_1: a3 | 1115 | * excsave_1: a3 |
1151 | * | 1116 | * |
1152 | * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. | 1117 | * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. |
1153 | * Note: We don't need to save a2 in depc (return value) | ||
1154 | */ | 1118 | */ |
1155 | 1119 | ||
1156 | ENTRY(fast_syscall_spill_registers) | 1120 | ENTRY(fast_syscall_spill_registers) |
@@ -1166,29 +1130,31 @@ ENTRY(fast_syscall_spill_registers) | |||
1166 | 1130 | ||
1167 | rsr a0, SAR | 1131 | rsr a0, SAR |
1168 | xsr a3, EXCSAVE_1 # restore a3 and excsave_1 | 1132 | xsr a3, EXCSAVE_1 # restore a3 and excsave_1 |
1169 | s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4 | ||
1170 | s32i a3, a2, PT_AREG3 | 1133 | s32i a3, a2, PT_AREG3 |
1134 | s32i a4, a2, PT_AREG4 | ||
1135 | s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 | ||
1171 | 1136 | ||
1172 | /* The spill routine might clobber a7, a11, and a15. */ | 1137 | /* The spill routine might clobber a7, a11, and a15. */ |
1173 | 1138 | ||
1174 | s32i a7, a2, PT_AREG5 | 1139 | s32i a7, a2, PT_AREG7 |
1175 | s32i a11, a2, PT_AREG6 | 1140 | s32i a11, a2, PT_AREG11 |
1176 | s32i a15, a2, PT_AREG7 | 1141 | s32i a15, a2, PT_AREG15 |
1177 | 1142 | ||
1178 | call0 _spill_registers # destroys a3, DEPC, and SAR | 1143 | call0 _spill_registers # destroys a3, a4, and SAR |
1179 | 1144 | ||
1180 | /* Advance PC, restore registers and SAR, and return from exception. */ | 1145 | /* Advance PC, restore registers and SAR, and return from exception. */ |
1181 | 1146 | ||
1182 | l32i a3, a2, PT_AREG4 | 1147 | l32i a3, a2, PT_AREG5 |
1148 | l32i a4, a2, PT_AREG4 | ||
1183 | l32i a0, a2, PT_AREG0 | 1149 | l32i a0, a2, PT_AREG0 |
1184 | wsr a3, SAR | 1150 | wsr a3, SAR |
1185 | l32i a3, a2, PT_AREG3 | 1151 | l32i a3, a2, PT_AREG3 |
1186 | 1152 | ||
1187 | /* Restore clobbered registers. */ | 1153 | /* Restore clobbered registers. */ |
1188 | 1154 | ||
1189 | l32i a7, a2, PT_AREG5 | 1155 | l32i a7, a2, PT_AREG7 |
1190 | l32i a11, a2, PT_AREG6 | 1156 | l32i a11, a2, PT_AREG11 |
1191 | l32i a15, a2, PT_AREG7 | 1157 | l32i a15, a2, PT_AREG15 |
1192 | 1158 | ||
1193 | movi a2, 0 | 1159 | movi a2, 0 |
1194 | rfe | 1160 | rfe |
@@ -1247,16 +1213,6 @@ fast_syscall_spill_registers_fixup: | |||
1247 | * Note: This frame might be the same as above. | 1213 | * Note: This frame might be the same as above. |
1248 | */ | 1214 | */ |
1249 | 1215 | ||
1250 | #ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION | ||
1251 | /* Restore registers we precautiously saved. | ||
1252 | * We have the value of the 'right' a3 | ||
1253 | */ | ||
1254 | |||
1255 | l32i a7, a2, PT_AREG5 | ||
1256 | l32i a11, a2, PT_AREG6 | ||
1257 | l32i a15, a2, PT_AREG7 | ||
1258 | #endif | ||
1259 | |||
1260 | /* Setup stack pointer. */ | 1216 | /* Setup stack pointer. */ |
1261 | 1217 | ||
1262 | addi a2, a2, -PT_USER_SIZE | 1218 | addi a2, a2, -PT_USER_SIZE |
@@ -1271,9 +1227,9 @@ fast_syscall_spill_registers_fixup: | |||
1271 | 1227 | ||
1272 | movi a3, exc_table | 1228 | movi a3, exc_table |
1273 | rsr a0, EXCCAUSE | 1229 | rsr a0, EXCCAUSE |
1274 | addx4 a0, a0, a3 # find entry in table | 1230 | addx4 a0, a0, a3 # find entry in table |
1275 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler | 1231 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler |
1276 | jx a0 | 1232 | jx a0 |
1277 | 1233 | ||
1278 | fast_syscall_spill_registers_fixup_return: | 1234 | fast_syscall_spill_registers_fixup_return: |
1279 | 1235 | ||
@@ -1290,14 +1246,6 @@ fast_syscall_spill_registers_fixup_return: | |||
1290 | s32i a2, a3, EXC_TABLE_PARAM | 1246 | s32i a2, a3, EXC_TABLE_PARAM |
1291 | l32i a2, a3, EXC_TABLE_KSTK | 1247 | l32i a2, a3, EXC_TABLE_KSTK |
1292 | 1248 | ||
1293 | #ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION | ||
1294 | /* Save registers again that might be clobbered. */ | ||
1295 | |||
1296 | s32i a7, a2, PT_AREG5 | ||
1297 | s32i a11, a2, PT_AREG6 | ||
1298 | s32i a15, a2, PT_AREG7 | ||
1299 | #endif | ||
1300 | |||
1301 | /* Load WB at the time the exception occurred. */ | 1249 | /* Load WB at the time the exception occurred. */ |
1302 | 1250 | ||
1303 | rsr a3, SAR # WB is still in SAR | 1251 | rsr a3, SAR # WB is still in SAR |
@@ -1319,7 +1267,7 @@ fast_syscall_spill_registers_fixup_return: | |||
1319 | * This is not a real function. The following conditions must be met: | 1267 | * This is not a real function. The following conditions must be met: |
1320 | * | 1268 | * |
1321 | * - must be called with call0. | 1269 | * - must be called with call0. |
1322 | * - uses DEPC, a3 and SAR. | 1270 | * - uses a3, a4 and SAR. |
1323 | * - the last 'valid' register of each frame are clobbered. | 1271 | * - the last 'valid' register of each frame are clobbered. |
1324 | * - the caller must have registered a fixup handler | 1272 | * - the caller must have registered a fixup handler |
1325 | * (or be inside a critical section) | 1273 | * (or be inside a critical section) |
@@ -1331,41 +1279,39 @@ ENTRY(_spill_registers) | |||
1331 | /* | 1279 | /* |
1332 | * Rotate ws so that the current windowbase is at bit 0. | 1280 | * Rotate ws so that the current windowbase is at bit 0. |
1333 | * Assume ws = xxxwww1yy (www1 current window frame). | 1281 | * Assume ws = xxxwww1yy (www1 current window frame). |
1334 | * Rotate ws right so that a2 = yyxxxwww1. | 1282 | * Rotate ws right so that a4 = yyxxxwww1. |
1335 | */ | 1283 | */ |
1336 | 1284 | ||
1337 | wsr a2, DEPC # preserve a2 | 1285 | rsr a4, WINDOWBASE |
1338 | rsr a2, WINDOWBASE | 1286 | rsr a3, WINDOWSTART # a3 = xxxwww1yy |
1339 | rsr a3, WINDOWSTART | 1287 | ssr a4 # holds WB |
1340 | ssr a2 # holds WB | 1288 | slli a4, a3, WSBITS |
1341 | slli a2, a3, WSBITS | 1289 | or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy |
1342 | or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy | 1290 | srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 |
1343 | srl a3, a3 | ||
1344 | 1291 | ||
1345 | /* We are done if there are no more than the current register frame. */ | 1292 | /* We are done if there are no more than the current register frame. */ |
1346 | 1293 | ||
1347 | extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww | 1294 | extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww |
1348 | movi a2, (1 << (WSBITS-1)) | 1295 | movi a4, (1 << (WSBITS-1)) |
1349 | _beqz a3, .Lnospill # only one active frame? jump | 1296 | _beqz a3, .Lnospill # only one active frame? jump |
1350 | 1297 | ||
1351 | /* We want 1 at the top, so that we return to the current windowbase */ | 1298 | /* We want 1 at the top, so that we return to the current windowbase */ |
1352 | 1299 | ||
1353 | or a3, a3, a2 # 1yyxxxwww | 1300 | or a3, a3, a4 # 1yyxxxwww |
1354 | 1301 | ||
1355 | /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ | 1302 | /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ |
1356 | 1303 | ||
1357 | wsr a3, WINDOWSTART # save shifted windowstart | 1304 | wsr a3, WINDOWSTART # save shifted windowstart |
1358 | neg a2, a3 | 1305 | neg a4, a3 |
1359 | and a3, a2, a3 # first bit set from right: 000010000 | 1306 | and a3, a4, a3 # first bit set from right: 000010000 |
1360 | 1307 | ||
1361 | ffs_ws a2, a3 # a2: shifts to skip empty frames | 1308 | ffs_ws a4, a3 # a4: shifts to skip empty frames |
1362 | movi a3, WSBITS | 1309 | movi a3, WSBITS |
1363 | sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right | 1310 | sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right |
1364 | ssr a2 # save in SAR for later. | 1311 | ssr a4 # save in SAR for later. |
1365 | 1312 | ||
1366 | rsr a3, WINDOWBASE | 1313 | rsr a3, WINDOWBASE |
1367 | add a3, a3, a2 | 1314 | add a3, a3, a4 |
1368 | rsr a2, DEPC # restore a2 | ||
1369 | wsr a3, WINDOWBASE | 1315 | wsr a3, WINDOWBASE |
1370 | rsync | 1316 | rsync |
1371 | 1317 | ||
@@ -1394,6 +1340,9 @@ ENTRY(_spill_registers) | |||
1394 | l32e a4, a1, -16 | 1340 | l32e a4, a1, -16 |
1395 | j .Lc12c | 1341 | j .Lc12c |
1396 | 1342 | ||
1343 | .Lnospill: | ||
1344 | ret | ||
1345 | |||
1397 | .Lloop: _bbsi.l a3, 1, .Lc4 | 1346 | .Lloop: _bbsi.l a3, 1, .Lc4 |
1398 | _bbci.l a3, 2, .Lc12 | 1347 | _bbci.l a3, 2, .Lc12 |
1399 | 1348 | ||
@@ -1419,9 +1368,7 @@ ENTRY(_spill_registers) | |||
1419 | movi a3, 1 | 1368 | movi a3, 1 |
1420 | sll a3, a3 | 1369 | sll a3, a3 |
1421 | wsr a3, WINDOWSTART | 1370 | wsr a3, WINDOWSTART |
1422 | 1371 | ret | |
1423 | .Lnospill: | ||
1424 | jx a0 | ||
1425 | 1372 | ||
1426 | .Lc4: s32e a4, a9, -16 | 1373 | .Lc4: s32e a4, a9, -16 |
1427 | s32e a5, a9, -12 | 1374 | s32e a5, a9, -12 |
@@ -1830,154 +1777,6 @@ ENTRY(fast_store_prohibited) | |||
1830 | 1: j _user_exception | 1777 | 1: j _user_exception |
1831 | 1778 | ||
1832 | 1779 | ||
1833 | #if XCHAL_EXTRA_SA_SIZE | ||
1834 | |||
1835 | #warning fast_coprocessor untested | ||
1836 | |||
1837 | /* | ||
1838 | * Entry condition: | ||
1839 | * | ||
1840 | * a0: trashed, original value saved on stack (PT_AREG0) | ||
1841 | * a1: a1 | ||
1842 | * a2: new stack pointer, original in DEPC | ||
1843 | * a3: dispatch table | ||
1844 | * depc: a2, original value saved on stack (PT_DEPC) | ||
1845 | * excsave_1: a3 | ||
1846 | * | ||
1847 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | ||
1848 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | ||
1849 | */ | ||
1850 | |||
1851 | ENTRY(fast_coprocessor_double) | ||
1852 | wsr a0, EXCSAVE_1 | ||
1853 | movi a0, unrecoverable_exception | ||
1854 | callx0 a0 | ||
1855 | |||
1856 | ENTRY(fast_coprocessor) | ||
1857 | |||
1858 | /* Fatal if we are in a double exception. */ | ||
1859 | |||
1860 | l32i a0, a2, PT_DEPC | ||
1861 | _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double | ||
1862 | |||
1863 | /* Save some registers a1, a3, a4, SAR */ | ||
1864 | |||
1865 | xsr a3, EXCSAVE_1 | ||
1866 | s32i a3, a2, PT_AREG3 | ||
1867 | rsr a3, SAR | ||
1868 | s32i a4, a2, PT_AREG4 | ||
1869 | s32i a1, a2, PT_AREG1 | ||
1870 | s32i a5, a1, PT_AREG5 | ||
1871 | s32i a3, a2, PT_SAR | ||
1872 | mov a1, a2 | ||
1873 | |||
1874 | /* Currently, the HAL macros only guarantee saving a0 and a1. | ||
1875 | * These can and will be refined in the future, but for now, | ||
1876 | * just save the remaining registers of a2...a15. | ||
1877 | */ | ||
1878 | s32i a6, a1, PT_AREG6 | ||
1879 | s32i a7, a1, PT_AREG7 | ||
1880 | s32i a8, a1, PT_AREG8 | ||
1881 | s32i a9, a1, PT_AREG9 | ||
1882 | s32i a10, a1, PT_AREG10 | ||
1883 | s32i a11, a1, PT_AREG11 | ||
1884 | s32i a12, a1, PT_AREG12 | ||
1885 | s32i a13, a1, PT_AREG13 | ||
1886 | s32i a14, a1, PT_AREG14 | ||
1887 | s32i a15, a1, PT_AREG15 | ||
1888 | |||
1889 | /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ | ||
1890 | |||
1891 | rsr a0, EXCCAUSE | ||
1892 | addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED | ||
1893 | |||
1894 | /* Set corresponding CPENABLE bit */ | ||
1895 | |||
1896 | movi a4, 1 | ||
1897 | ssl a3 # SAR: 32 - coprocessor_number | ||
1898 | rsr a5, CPENABLE | ||
1899 | sll a4, a4 | ||
1900 | or a4, a5, a4 | ||
1901 | wsr a4, CPENABLE | ||
1902 | rsync | ||
1903 | movi a5, coprocessor_info # list of owner and offset into cp_save | ||
1904 | addx8 a0, a4, a5 # entry for CP | ||
1905 | |||
1906 | bne a4, a5, .Lload # bit wasn't set before, cp not in use | ||
1907 | |||
1908 | /* Now compare the current task with the owner of the coprocessor. | ||
1909 | * If they are the same, there is no reason to save or restore any | ||
1910 | * coprocessor state. Having already enabled the coprocessor, | ||
1911 | * branch ahead to return. | ||
1912 | */ | ||
1913 | GET_CURRENT(a5,a1) | ||
1914 | l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP | ||
1915 | beq a4, a5, .Ldone | ||
1916 | |||
1917 | /* Find location to dump current coprocessor state: | ||
1918 | * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor] | ||
1919 | * | ||
1920 | * Note: a0 pointer to the entry in the coprocessor owner table, | ||
1921 | * a3 coprocessor number, | ||
1922 | * a4 current owner of coprocessor. | ||
1923 | */ | ||
1924 | l32i a5, a0, COPROCESSOR_INFO_OFFSET | ||
1925 | addi a2, a4, THREAD_CP_SAVE | ||
1926 | add a2, a2, a5 | ||
1927 | |||
1928 | /* Store current coprocessor states. (a5 still has CP number) */ | ||
1929 | |||
1930 | xchal_cpi_store_funcbody | ||
1931 | |||
1932 | /* The macro might have destroyed a3 (coprocessor number), but | ||
1933 | * SAR still has 32 - coprocessor_number! | ||
1934 | */ | ||
1935 | movi a3, 32 | ||
1936 | rsr a4, SAR | ||
1937 | sub a3, a3, a4 | ||
1938 | |||
1939 | .Lload: /* A new task now owns the corpocessors. Save its TCB pointer into | ||
1940 | * the coprocessor owner table. | ||
1941 | * | ||
1942 | * Note: a0 pointer to the entry in the coprocessor owner table, | ||
1943 | * a3 coprocessor number. | ||
1944 | */ | ||
1945 | GET_CURRENT(a4,a1) | ||
1946 | s32i a4, a0, 0 | ||
1947 | |||
1948 | /* Find location from where to restore the current coprocessor state.*/ | ||
1949 | |||
1950 | l32i a5, a0, COPROCESSOR_INFO_OFFSET | ||
1951 | addi a2, a4, THREAD_CP_SAVE | ||
1952 | add a2, a2, a4 | ||
1953 | |||
1954 | xchal_cpi_load_funcbody | ||
1955 | |||
1956 | /* We must assume that the xchal_cpi_store_funcbody macro destroyed | ||
1957 | * registers a2..a15. | ||
1958 | */ | ||
1959 | |||
1960 | .Ldone: l32i a15, a1, PT_AREG15 | ||
1961 | l32i a14, a1, PT_AREG14 | ||
1962 | l32i a13, a1, PT_AREG13 | ||
1963 | l32i a12, a1, PT_AREG12 | ||
1964 | l32i a11, a1, PT_AREG11 | ||
1965 | l32i a10, a1, PT_AREG10 | ||
1966 | l32i a9, a1, PT_AREG9 | ||
1967 | l32i a8, a1, PT_AREG8 | ||
1968 | l32i a7, a1, PT_AREG7 | ||
1969 | l32i a6, a1, PT_AREG6 | ||
1970 | l32i a5, a1, PT_AREG5 | ||
1971 | l32i a4, a1, PT_AREG4 | ||
1972 | l32i a3, a1, PT_AREG3 | ||
1973 | l32i a2, a1, PT_AREG2 | ||
1974 | l32i a0, a1, PT_AREG0 | ||
1975 | l32i a1, a1, PT_AREG1 | ||
1976 | |||
1977 | rfe | ||
1978 | |||
1979 | #endif /* XCHAL_EXTRA_SA_SIZE */ | ||
1980 | |||
1981 | /* | 1780 | /* |
1982 | * System Calls. | 1781 | * System Calls. |
1983 | * | 1782 | * |
@@ -2086,20 +1885,36 @@ ENTRY(_switch_to) | |||
2086 | 1885 | ||
2087 | entry a1, 16 | 1886 | entry a1, 16 |
2088 | 1887 | ||
2089 | mov a4, a3 # preserve a3 | 1888 | mov a12, a2 # preserve 'prev' (a2) |
1889 | mov a13, a3 # and 'next' (a3) | ||
1890 | |||
1891 | l32i a4, a2, TASK_THREAD_INFO | ||
1892 | l32i a5, a3, TASK_THREAD_INFO | ||
1893 | |||
1894 | save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER | ||
2090 | 1895 | ||
2091 | s32i a0, a2, THREAD_RA # save return address | 1896 | s32i a0, a12, THREAD_RA # save return address |
2092 | s32i a1, a2, THREAD_SP # save stack pointer | 1897 | s32i a1, a12, THREAD_SP # save stack pointer |
2093 | 1898 | ||
2094 | /* Disable ints while we manipulate the stack pointer; spill regs. */ | 1899 | /* Disable ints while we manipulate the stack pointer. */ |
2095 | 1900 | ||
2096 | movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL | 1901 | movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL |
2097 | xsr a5, PS | 1902 | xsr a14, PS |
2098 | rsr a3, EXCSAVE_1 | 1903 | rsr a3, EXCSAVE_1 |
2099 | rsync | 1904 | rsync |
2100 | s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ | 1905 | s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ |
2101 | 1906 | ||
2102 | call0 _spill_registers | 1907 | /* Switch CPENABLE */ |
1908 | |||
1909 | #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) | ||
1910 | l32i a3, a5, THREAD_CPENABLE | ||
1911 | xsr a3, CPENABLE | ||
1912 | s32i a3, a4, THREAD_CPENABLE | ||
1913 | #endif | ||
1914 | |||
1915 | /* Flush register file. */ | ||
1916 | |||
1917 | call0 _spill_registers # destroys a3, a4, and SAR | ||
2103 | 1918 | ||
2104 | /* Set kernel stack (and leave critical section) | 1919 | /* Set kernel stack (and leave critical section) |
2105 | * Note: It's save to set it here. The stack will not be overwritten | 1920 | * Note: It's save to set it here. The stack will not be overwritten |
@@ -2107,19 +1922,21 @@ ENTRY(_switch_to) | |||
2107 | * we return from kernel space. | 1922 | * we return from kernel space. |
2108 | */ | 1923 | */ |
2109 | 1924 | ||
2110 | l32i a0, a4, TASK_THREAD_INFO | ||
2111 | rsr a3, EXCSAVE_1 # exc_table | 1925 | rsr a3, EXCSAVE_1 # exc_table |
2112 | movi a1, 0 | 1926 | movi a6, 0 |
2113 | addi a0, a0, PT_REGS_OFFSET | 1927 | addi a7, a5, PT_REGS_OFFSET |
2114 | s32i a1, a3, EXC_TABLE_FIXUP | 1928 | s32i a6, a3, EXC_TABLE_FIXUP |
2115 | s32i a0, a3, EXC_TABLE_KSTK | 1929 | s32i a7, a3, EXC_TABLE_KSTK |
2116 | 1930 | ||
2117 | /* restore context of the task that 'next' addresses */ | 1931 | /* restore context of the task that 'next' addresses */ |
2118 | 1932 | ||
2119 | l32i a0, a4, THREAD_RA /* restore return address */ | 1933 | l32i a0, a13, THREAD_RA # restore return address |
2120 | l32i a1, a4, THREAD_SP /* restore stack pointer */ | 1934 | l32i a1, a13, THREAD_SP # restore stack pointer |
1935 | |||
1936 | load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER | ||
2121 | 1937 | ||
2122 | wsr a5, PS | 1938 | wsr a14, PS |
1939 | mov a2, a12 # return 'prev' | ||
2123 | rsync | 1940 | rsync |
2124 | 1941 | ||
2125 | retw | 1942 | retw |