diff options
author | Cyrill Gorcunov <gorcunov@gmail.com> | 2008-11-27 13:10:08 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-28 08:53:48 -0500 |
commit | 9f1e87ea3ecb3c46c21f6a1a202ec82f99ed2473 (patch) | |
tree | 1ad14e1d9c1da38ccee3487c4c945327499dc412 /arch/x86 | |
parent | 5ae3a139cf4fc2349f1dfa1993a66c1dcc119468 (diff) |
x86: entry_64.S - trivial: space, comments fixup
Impact: cleanup
Signed-off-by: Cyrill Gorcunov <gorcunov@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 94 |
1 files changed, 48 insertions, 46 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 57d7f7a5ad2f..08c0c9777a09 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1020,7 +1020,7 @@ END(\sym) | |||
1020 | 1020 | ||
1021 | .macro paranoidzeroentry_ist sym do_sym ist | 1021 | .macro paranoidzeroentry_ist sym do_sym ist |
1022 | ENTRY(\sym) | 1022 | ENTRY(\sym) |
1023 | INTR_FRAME | 1023 | INTR_FRAME |
1024 | PARAVIRT_ADJUST_EXCEPTION_FRAME | 1024 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
1025 | pushq $-1 /* ORIG_RAX: no syscall to restart */ | 1025 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
1026 | CFI_ADJUST_CFA_OFFSET 8 | 1026 | CFI_ADJUST_CFA_OFFSET 8 |
@@ -1088,36 +1088,36 @@ zeroentry coprocessor_error do_coprocessor_error | |||
1088 | errorentry alignment_check do_alignment_check | 1088 | errorentry alignment_check do_alignment_check |
1089 | zeroentry simd_coprocessor_error do_simd_coprocessor_error | 1089 | zeroentry simd_coprocessor_error do_simd_coprocessor_error |
1090 | 1090 | ||
1091 | /* Reload gs selector with exception handling */ | 1091 | /* Reload gs selector with exception handling */ |
1092 | /* edi: new selector */ | 1092 | /* edi: new selector */ |
1093 | ENTRY(native_load_gs_index) | 1093 | ENTRY(native_load_gs_index) |
1094 | CFI_STARTPROC | 1094 | CFI_STARTPROC |
1095 | pushf | 1095 | pushf |
1096 | CFI_ADJUST_CFA_OFFSET 8 | 1096 | CFI_ADJUST_CFA_OFFSET 8 |
1097 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) | 1097 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) |
1098 | SWAPGS | 1098 | SWAPGS |
1099 | gs_change: | 1099 | gs_change: |
1100 | movl %edi,%gs | 1100 | movl %edi,%gs |
1101 | 2: mfence /* workaround */ | 1101 | 2: mfence /* workaround */ |
1102 | SWAPGS | 1102 | SWAPGS |
1103 | popf | 1103 | popf |
1104 | CFI_ADJUST_CFA_OFFSET -8 | 1104 | CFI_ADJUST_CFA_OFFSET -8 |
1105 | ret | 1105 | ret |
1106 | CFI_ENDPROC | 1106 | CFI_ENDPROC |
1107 | END(native_load_gs_index) | 1107 | END(native_load_gs_index) |
1108 | 1108 | ||
1109 | .section __ex_table,"a" | 1109 | .section __ex_table,"a" |
1110 | .align 8 | 1110 | .align 8 |
1111 | .quad gs_change,bad_gs | 1111 | .quad gs_change,bad_gs |
1112 | .previous | 1112 | .previous |
1113 | .section .fixup,"ax" | 1113 | .section .fixup,"ax" |
1114 | /* running with kernelgs */ | 1114 | /* running with kernelgs */ |
1115 | bad_gs: | 1115 | bad_gs: |
1116 | SWAPGS /* switch back to user gs */ | 1116 | SWAPGS /* switch back to user gs */ |
1117 | xorl %eax,%eax | 1117 | xorl %eax,%eax |
1118 | movl %eax,%gs | 1118 | movl %eax,%gs |
1119 | jmp 2b | 1119 | jmp 2b |
1120 | .previous | 1120 | .previous |
1121 | 1121 | ||
1122 | /* | 1122 | /* |
1123 | * Create a kernel thread. | 1123 | * Create a kernel thread. |
@@ -1152,7 +1152,7 @@ ENTRY(kernel_thread) | |||
1152 | * so internally to the x86_64 port you can rely on kernel_thread() | 1152 | * so internally to the x86_64 port you can rely on kernel_thread() |
1153 | * not to reschedule the child before returning, this avoids the need | 1153 | * not to reschedule the child before returning, this avoids the need |
1154 | * of hacks for example to fork off the per-CPU idle tasks. | 1154 | * of hacks for example to fork off the per-CPU idle tasks. |
1155 | * [Hopefully no generic code relies on the reschedule -AK] | 1155 | * [Hopefully no generic code relies on the reschedule -AK] |
1156 | */ | 1156 | */ |
1157 | RESTORE_ALL | 1157 | RESTORE_ALL |
1158 | UNFAKE_STACK_FRAME | 1158 | UNFAKE_STACK_FRAME |
@@ -1231,22 +1231,24 @@ END(call_softirq) | |||
1231 | zeroentry xen_hypervisor_callback xen_do_hypervisor_callback | 1231 | zeroentry xen_hypervisor_callback xen_do_hypervisor_callback |
1232 | 1232 | ||
1233 | /* | 1233 | /* |
1234 | # A note on the "critical region" in our callback handler. | 1234 | * A note on the "critical region" in our callback handler. |
1235 | # We want to avoid stacking callback handlers due to events occurring | 1235 | * We want to avoid stacking callback handlers due to events occurring |
1236 | # during handling of the last event. To do this, we keep events disabled | 1236 | * during handling of the last event. To do this, we keep events disabled |
1237 | # until we've done all processing. HOWEVER, we must enable events before | 1237 | * until we've done all processing. HOWEVER, we must enable events before |
1238 | # popping the stack frame (can't be done atomically) and so it would still | 1238 | * popping the stack frame (can't be done atomically) and so it would still |
1239 | # be possible to get enough handler activations to overflow the stack. | 1239 | * be possible to get enough handler activations to overflow the stack. |
1240 | # Although unlikely, bugs of that kind are hard to track down, so we'd | 1240 | * Although unlikely, bugs of that kind are hard to track down, so we'd |
1241 | # like to avoid the possibility. | 1241 | * like to avoid the possibility. |
1242 | # So, on entry to the handler we detect whether we interrupted an | 1242 | * So, on entry to the handler we detect whether we interrupted an |
1243 | # existing activation in its critical region -- if so, we pop the current | 1243 | * existing activation in its critical region -- if so, we pop the current |
1244 | # activation and restart the handler using the previous one. | 1244 | * activation and restart the handler using the previous one. |
1245 | */ | 1245 | */ |
1246 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | 1246 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) |
1247 | CFI_STARTPROC | 1247 | CFI_STARTPROC |
1248 | /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | 1248 | /* |
1249 | see the correct pointer to the pt_regs */ | 1249 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will |
1250 | * see the correct pointer to the pt_regs | ||
1251 | */ | ||
1250 | movq %rdi, %rsp # we don't return, adjust the stack frame | 1252 | movq %rdi, %rsp # we don't return, adjust the stack frame |
1251 | CFI_ENDPROC | 1253 | CFI_ENDPROC |
1252 | DEFAULT_FRAME | 1254 | DEFAULT_FRAME |
@@ -1264,18 +1266,18 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) | |||
1264 | END(do_hypervisor_callback) | 1266 | END(do_hypervisor_callback) |
1265 | 1267 | ||
1266 | /* | 1268 | /* |
1267 | # Hypervisor uses this for application faults while it executes. | 1269 | * Hypervisor uses this for application faults while it executes. |
1268 | # We get here for two reasons: | 1270 | * We get here for two reasons: |
1269 | # 1. Fault while reloading DS, ES, FS or GS | 1271 | * 1. Fault while reloading DS, ES, FS or GS |
1270 | # 2. Fault while executing IRET | 1272 | * 2. Fault while executing IRET |
1271 | # Category 1 we do not need to fix up as Xen has already reloaded all segment | 1273 | * Category 1 we do not need to fix up as Xen has already reloaded all segment |
1272 | # registers that could be reloaded and zeroed the others. | 1274 | * registers that could be reloaded and zeroed the others. |
1273 | # Category 2 we fix up by killing the current process. We cannot use the | 1275 | * Category 2 we fix up by killing the current process. We cannot use the |
1274 | # normal Linux return path in this case because if we use the IRET hypercall | 1276 | * normal Linux return path in this case because if we use the IRET hypercall |
1275 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. | 1277 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
1276 | # We distinguish between categories by comparing each saved segment register | 1278 | * We distinguish between categories by comparing each saved segment register |
1277 | # with its current contents: any discrepancy means we in category 1. | 1279 | * with its current contents: any discrepancy means we in category 1. |
1278 | */ | 1280 | */ |
1279 | ENTRY(xen_failsafe_callback) | 1281 | ENTRY(xen_failsafe_callback) |
1280 | INTR_FRAME 1 (6*8) | 1282 | INTR_FRAME 1 (6*8) |
1281 | /*CFI_REL_OFFSET gs,GS*/ | 1283 | /*CFI_REL_OFFSET gs,GS*/ |
@@ -1339,8 +1341,8 @@ paranoidzeroentry machine_check do_machine_check | |||
1339 | #endif | 1341 | #endif |
1340 | 1342 | ||
1341 | /* | 1343 | /* |
1342 | * "Paranoid" exit path from exception stack. | 1344 | * "Paranoid" exit path from exception stack. |
1343 | * Paranoid because this is used by NMIs and cannot take | 1345 | * Paranoid because this is used by NMIs and cannot take |
1344 | * any kernel state for granted. | 1346 | * any kernel state for granted. |
1345 | * We don't do kernel preemption checks here, because only | 1347 | * We don't do kernel preemption checks here, because only |
1346 | * NMI should be common and it does not enable IRQs and | 1348 | * NMI should be common and it does not enable IRQs and |
@@ -1445,7 +1447,7 @@ error_kernelspace: | |||
1445 | cmpq %rcx,RIP+8(%rsp) | 1447 | cmpq %rcx,RIP+8(%rsp) |
1446 | je error_swapgs | 1448 | je error_swapgs |
1447 | cmpq $gs_change,RIP+8(%rsp) | 1449 | cmpq $gs_change,RIP+8(%rsp) |
1448 | je error_swapgs | 1450 | je error_swapgs |
1449 | jmp error_sti | 1451 | jmp error_sti |
1450 | END(error_entry) | 1452 | END(error_entry) |
1451 | 1453 | ||
@@ -1521,7 +1523,7 @@ nmi_schedule: | |||
1521 | CFI_ENDPROC | 1523 | CFI_ENDPROC |
1522 | #else | 1524 | #else |
1523 | jmp paranoid_exit | 1525 | jmp paranoid_exit |
1524 | CFI_ENDPROC | 1526 | CFI_ENDPROC |
1525 | #endif | 1527 | #endif |
1526 | END(nmi) | 1528 | END(nmi) |
1527 | 1529 | ||