diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/tile/kernel/entry.S | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/tile/kernel/entry.S')
-rw-r--r-- | arch/tile/kernel/entry.S | 50 |
1 files changed, 9 insertions, 41 deletions
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 3d01383b1b0e..431e9ae60488 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S | |||
@@ -15,7 +15,9 @@ | |||
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <linux/unistd.h> | 16 | #include <linux/unistd.h> |
17 | #include <asm/irqflags.h> | 17 | #include <asm/irqflags.h> |
18 | #include <asm/processor.h> | ||
18 | #include <arch/abi.h> | 19 | #include <arch/abi.h> |
20 | #include <arch/spr_def.h> | ||
19 | 21 | ||
20 | #ifdef __tilegx__ | 22 | #ifdef __tilegx__ |
21 | #define bnzt bnezt | 23 | #define bnzt bnezt |
@@ -25,28 +27,6 @@ STD_ENTRY(current_text_addr) | |||
25 | { move r0, lr; jrp lr } | 27 | { move r0, lr; jrp lr } |
26 | STD_ENDPROC(current_text_addr) | 28 | STD_ENDPROC(current_text_addr) |
27 | 29 | ||
28 | STD_ENTRY(_sim_syscall) | ||
29 | /* | ||
30 | * Wait for r0-r9 to be ready (and lr on the off chance we | ||
31 | * want the syscall to locate its caller), then make a magic | ||
32 | * simulator syscall. | ||
33 | * | ||
34 | * We carefully stall until the registers are readable in case they | ||
35 | * are the target of a slow load, etc. so that tile-sim will | ||
36 | * definitely be able to read all of them inside the magic syscall. | ||
37 | * | ||
38 | * Technically this is wrong for r3-r9 and lr, since an interrupt | ||
39 | * could come in and restore the registers with a slow load right | ||
40 | * before executing the mtspr. We may need to modify tile-sim to | ||
41 | * explicitly stall for this case, but we do not yet have | ||
42 | * a way to implement such a stall. | ||
43 | */ | ||
44 | { and zero, lr, r9 ; and zero, r8, r7 } | ||
45 | { and zero, r6, r5 ; and zero, r4, r3 } | ||
46 | { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 } | ||
47 | { jrp lr } | ||
48 | STD_ENDPROC(_sim_syscall) | ||
49 | |||
50 | /* | 30 | /* |
51 | * Implement execve(). The i386 code has a note that forking from kernel | 31 | * Implement execve(). The i386 code has a note that forking from kernel |
52 | * space results in no copy on write until the execve, so we should be | 32 | * space results in no copy on write until the execve, so we should be |
@@ -58,12 +38,6 @@ STD_ENTRY(kernel_execve) | |||
58 | jrp lr | 38 | jrp lr |
59 | STD_ENDPROC(kernel_execve) | 39 | STD_ENDPROC(kernel_execve) |
60 | 40 | ||
61 | /* Delay a fixed number of cycles. */ | ||
62 | STD_ENTRY(__delay) | ||
63 | { addi r0, r0, -1; bnzt r0, . } | ||
64 | jrp lr | ||
65 | STD_ENDPROC(__delay) | ||
66 | |||
67 | /* | 41 | /* |
68 | * We don't run this function directly, but instead copy it to a page | 42 | * We don't run this function directly, but instead copy it to a page |
69 | * we map into every user process. See vdso_setup(). | 43 | * we map into every user process. See vdso_setup(). |
@@ -102,7 +76,7 @@ STD_ENTRY(KBacktraceIterator_init_current) | |||
102 | STD_ENTRY(cpu_idle_on_new_stack) | 76 | STD_ENTRY(cpu_idle_on_new_stack) |
103 | { | 77 | { |
104 | move sp, r1 | 78 | move sp, r1 |
105 | mtspr SYSTEM_SAVE_1_0, r2 | 79 | mtspr SPR_SYSTEM_SAVE_K_0, r2 |
106 | } | 80 | } |
107 | jal free_thread_info | 81 | jal free_thread_info |
108 | j cpu_idle | 82 | j cpu_idle |
@@ -117,23 +91,17 @@ STD_ENTRY(smp_nap) | |||
117 | 91 | ||
118 | /* | 92 | /* |
119 | * Enable interrupts racelessly and then nap until interrupted. | 93 | * Enable interrupts racelessly and then nap until interrupted. |
94 | * Architecturally, we are guaranteed that enabling interrupts via | ||
95 | * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC. | ||
120 | * This function's _cpu_idle_nap address is special; see intvec.S. | 96 | * This function's _cpu_idle_nap address is special; see intvec.S. |
121 | * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and | 97 | * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and |
122 | * as a result return to the function that called _cpu_idle(). | 98 | * as a result return to the function that called _cpu_idle(). |
123 | */ | 99 | */ |
124 | STD_ENTRY(_cpu_idle) | 100 | STD_ENTRY(_cpu_idle) |
125 | { | 101 | movei r1, 1 |
126 | lnk r0 | 102 | mtspr INTERRUPT_CRITICAL_SECTION, r1 |
127 | movei r1, 1 | 103 | IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ |
128 | } | 104 | mtspr INTERRUPT_CRITICAL_SECTION, zero |
129 | { | ||
130 | addli r0, r0, _cpu_idle_nap - . | ||
131 | mtspr INTERRUPT_CRITICAL_SECTION, r1 | ||
132 | } | ||
133 | IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ | ||
134 | mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */ | ||
135 | mtspr EX_CONTEXT_1_0, r0 | ||
136 | iret | ||
137 | .global _cpu_idle_nap | 105 | .global _cpu_idle_nap |
138 | _cpu_idle_nap: | 106 | _cpu_idle_nap: |
139 | nap | 107 | nap |