diff options
-rw-r--r-- | arch/tile/include/asm/irqflags.h | 34 | ||||
-rw-r--r-- | arch/tile/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_64.S | 78 | ||||
-rw-r--r-- | arch/tile/mm/init.c | 4 | ||||
-rw-r--r-- | arch/tile/mm/migrate.h | 6 | ||||
-rw-r--r-- | arch/tile/mm/migrate_32.S | 36 | ||||
-rw-r--r-- | arch/tile/mm/migrate_64.S | 34 |
7 files changed, 96 insertions, 99 deletions
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 5db0ce54284d..b4e96fef2cf8 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -28,10 +28,10 @@ | |||
28 | */ | 28 | */ |
29 | #if CHIP_HAS_AUX_PERF_COUNTERS() | 29 | #if CHIP_HAS_AUX_PERF_COUNTERS() |
30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | 30 | #define LINUX_MASKABLE_INTERRUPTS_HI \ |
31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) | 31 | (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) |
32 | #else | 32 | #else |
33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ | 33 | #define LINUX_MASKABLE_INTERRUPTS_HI \ |
34 | (~(INT_MASK_HI(INT_PERF_COUNT))) | 34 | (~(INT_MASK_HI(INT_PERF_COUNT))) |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | #else | 37 | #else |
@@ -90,6 +90,14 @@ | |||
90 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ | 90 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ |
91 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ | 91 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ |
92 | } while (0) | 92 | } while (0) |
93 | #define interrupt_mask_save_mask() \ | ||
94 | (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \ | ||
95 | (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32)) | ||
96 | #define interrupt_mask_restore_mask(mask) do { \ | ||
97 | unsigned long long __m = (mask); \ | ||
98 | __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \ | ||
99 | __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \ | ||
100 | } while (0) | ||
93 | #else | 101 | #else |
94 | #define interrupt_mask_set(n) \ | 102 | #define interrupt_mask_set(n) \ |
95 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) | 103 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) |
@@ -101,6 +109,10 @@ | |||
101 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) | 109 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) |
102 | #define interrupt_mask_reset_mask(mask) \ | 110 | #define interrupt_mask_reset_mask(mask) \ |
103 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) | 111 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) |
112 | #define interrupt_mask_save_mask() \ | ||
113 | __insn_mfspr(SPR_INTERRUPT_MASK_K) | ||
114 | #define interrupt_mask_restore_mask(mask) \ | ||
115 | __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask)) | ||
104 | #endif | 116 | #endif |
105 | 117 | ||
106 | /* | 118 | /* |
@@ -122,7 +134,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
122 | 134 | ||
123 | /* Disable all interrupts, including NMIs. */ | 135 | /* Disable all interrupts, including NMIs. */ |
124 | #define arch_local_irq_disable_all() \ | 136 | #define arch_local_irq_disable_all() \ |
125 | interrupt_mask_set_mask(-1UL) | 137 | interrupt_mask_set_mask(-1ULL) |
126 | 138 | ||
127 | /* Re-enable all maskable interrupts. */ | 139 | /* Re-enable all maskable interrupts. */ |
128 | #define arch_local_irq_enable() \ | 140 | #define arch_local_irq_enable() \ |
@@ -179,7 +191,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
179 | #ifdef __tilegx__ | 191 | #ifdef __tilegx__ |
180 | 192 | ||
181 | #if INT_MEM_ERROR != 0 | 193 | #if INT_MEM_ERROR != 0 |
182 | # error Fix IRQ_DISABLED() macro | 194 | # error Fix IRQS_DISABLED() macro |
183 | #endif | 195 | #endif |
184 | 196 | ||
185 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ | 197 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ |
@@ -207,9 +219,10 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
207 | mtspr SPR_INTERRUPT_MASK_SET_K, tmp | 219 | mtspr SPR_INTERRUPT_MASK_SET_K, tmp |
208 | 220 | ||
209 | /* Enable interrupts. */ | 221 | /* Enable interrupts. */ |
210 | #define IRQ_ENABLE(tmp0, tmp1) \ | 222 | #define IRQ_ENABLE_LOAD(tmp0, tmp1) \ |
211 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | 223 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ |
212 | ld tmp0, tmp0; \ | 224 | ld tmp0, tmp0 |
225 | #define IRQ_ENABLE_APPLY(tmp0, tmp1) \ | ||
213 | mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 | 226 | mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 |
214 | 227 | ||
215 | #else /* !__tilegx__ */ | 228 | #else /* !__tilegx__ */ |
@@ -253,17 +266,22 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | |||
253 | mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp | 266 | mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp |
254 | 267 | ||
255 | /* Enable interrupts. */ | 268 | /* Enable interrupts. */ |
256 | #define IRQ_ENABLE(tmp0, tmp1) \ | 269 | #define IRQ_ENABLE_LOAD(tmp0, tmp1) \ |
257 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | 270 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ |
258 | { \ | 271 | { \ |
259 | lw tmp0, tmp0; \ | 272 | lw tmp0, tmp0; \ |
260 | addi tmp1, tmp0, 4 \ | 273 | addi tmp1, tmp0, 4 \ |
261 | }; \ | 274 | }; \ |
262 | lw tmp1, tmp1; \ | 275 | lw tmp1, tmp1 |
276 | #define IRQ_ENABLE_APPLY(tmp0, tmp1) \ | ||
263 | mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ | 277 | mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ |
264 | mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 | 278 | mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 |
265 | #endif | 279 | #endif |
266 | 280 | ||
281 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
282 | IRQ_ENABLE_LOAD(tmp0, tmp1); \ | ||
283 | IRQ_ENABLE_APPLY(tmp0, tmp1) | ||
284 | |||
267 | /* | 285 | /* |
268 | * Do the CPU's IRQ-state tracing from assembly code. We call a | 286 | * Do the CPU's IRQ-state tracing from assembly code. We call a |
269 | * C function, but almost everywhere we do, we don't mind clobbering | 287 | * C function, but almost everywhere we do, we don't mind clobbering |
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index ec91568df880..133c4b56a99e 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S | |||
@@ -100,8 +100,9 @@ STD_ENTRY(smp_nap) | |||
100 | */ | 100 | */ |
101 | STD_ENTRY(_cpu_idle) | 101 | STD_ENTRY(_cpu_idle) |
102 | movei r1, 1 | 102 | movei r1, 1 |
103 | IRQ_ENABLE_LOAD(r2, r3) | ||
103 | mtspr INTERRUPT_CRITICAL_SECTION, r1 | 104 | mtspr INTERRUPT_CRITICAL_SECTION, r1 |
104 | IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ | 105 | IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */ |
105 | mtspr INTERRUPT_CRITICAL_SECTION, zero | 106 | mtspr INTERRUPT_CRITICAL_SECTION, zero |
106 | .global _cpu_idle_nap | 107 | .global _cpu_idle_nap |
107 | _cpu_idle_nap: | 108 | _cpu_idle_nap: |
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 30ae76e50c44..0ae8723ea578 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S | |||
@@ -220,7 +220,9 @@ intvec_\vecname: | |||
220 | * This routine saves just the first four registers, plus the | 220 | * This routine saves just the first four registers, plus the |
221 | * stack context so we can do proper backtracing right away, | 221 | * stack context so we can do proper backtracing right away, |
222 | * and defers to handle_interrupt to save the rest. | 222 | * and defers to handle_interrupt to save the rest. |
223 | * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. | 223 | * The backtracer needs pc, ex1, lr, sp, r52, and faultnum, |
224 | * and needs sp set to its final location at the bottom of | ||
225 | * the stack frame. | ||
224 | */ | 226 | */ |
225 | addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) | 227 | addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) |
226 | wh64 r0 /* cache line 7 */ | 228 | wh64 r0 /* cache line 7 */ |
@@ -450,23 +452,6 @@ intvec_\vecname: | |||
450 | push_reg r5, r52 | 452 | push_reg r5, r52 |
451 | st r52, r4 | 453 | st r52, r4 |
452 | 454 | ||
453 | /* Load tp with our per-cpu offset. */ | ||
454 | #ifdef CONFIG_SMP | ||
455 | { | ||
456 | mfspr r20, SPR_SYSTEM_SAVE_K_0 | ||
457 | moveli r21, hw2_last(__per_cpu_offset) | ||
458 | } | ||
459 | { | ||
460 | shl16insli r21, r21, hw1(__per_cpu_offset) | ||
461 | bfextu r20, r20, 0, LOG2_THREAD_SIZE-1 | ||
462 | } | ||
463 | shl16insli r21, r21, hw0(__per_cpu_offset) | ||
464 | shl3add r20, r20, r21 | ||
465 | ld tp, r20 | ||
466 | #else | ||
467 | move tp, zero | ||
468 | #endif | ||
469 | |||
470 | /* | 455 | /* |
471 | * If we will be returning to the kernel, we will need to | 456 | * If we will be returning to the kernel, we will need to |
472 | * reset the interrupt masks to the state they had before. | 457 | * reset the interrupt masks to the state they had before. |
@@ -489,6 +474,44 @@ intvec_\vecname: | |||
489 | .endif | 474 | .endif |
490 | st r21, r32 | 475 | st r21, r32 |
491 | 476 | ||
477 | /* | ||
478 | * we've captured enough state to the stack (including in | ||
479 | * particular our EX_CONTEXT state) that we can now release | ||
480 | * the interrupt critical section and replace it with our | ||
481 | * standard "interrupts disabled" mask value. This allows | ||
482 | * synchronous interrupts (and profile interrupts) to punch | ||
483 | * through from this point onwards. | ||
484 | * | ||
485 | * It's important that no code before this point touch memory | ||
486 | * other than our own stack (to keep the invariant that this | ||
487 | * is all that gets touched under ICS), and that no code after | ||
488 | * this point reference any interrupt-specific SPR, in particular | ||
489 | * the EX_CONTEXT_K_ values. | ||
490 | */ | ||
491 | .ifc \function,handle_nmi | ||
492 | IRQ_DISABLE_ALL(r20) | ||
493 | .else | ||
494 | IRQ_DISABLE(r20, r21) | ||
495 | .endif | ||
496 | mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
497 | |||
498 | /* Load tp with our per-cpu offset. */ | ||
499 | #ifdef CONFIG_SMP | ||
500 | { | ||
501 | mfspr r20, SPR_SYSTEM_SAVE_K_0 | ||
502 | moveli r21, hw2_last(__per_cpu_offset) | ||
503 | } | ||
504 | { | ||
505 | shl16insli r21, r21, hw1(__per_cpu_offset) | ||
506 | bfextu r20, r20, 0, LOG2_THREAD_SIZE-1 | ||
507 | } | ||
508 | shl16insli r21, r21, hw0(__per_cpu_offset) | ||
509 | shl3add r20, r20, r21 | ||
510 | ld tp, r20 | ||
511 | #else | ||
512 | move tp, zero | ||
513 | #endif | ||
514 | |||
492 | #ifdef __COLLECT_LINKER_FEEDBACK__ | 515 | #ifdef __COLLECT_LINKER_FEEDBACK__ |
493 | /* | 516 | /* |
494 | * Notify the feedback routines that we were in the | 517 | * Notify the feedback routines that we were in the |
@@ -513,21 +536,6 @@ intvec_\vecname: | |||
513 | #endif | 536 | #endif |
514 | 537 | ||
515 | /* | 538 | /* |
516 | * we've captured enough state to the stack (including in | ||
517 | * particular our EX_CONTEXT state) that we can now release | ||
518 | * the interrupt critical section and replace it with our | ||
519 | * standard "interrupts disabled" mask value. This allows | ||
520 | * synchronous interrupts (and profile interrupts) to punch | ||
521 | * through from this point onwards. | ||
522 | */ | ||
523 | .ifc \function,handle_nmi | ||
524 | IRQ_DISABLE_ALL(r20) | ||
525 | .else | ||
526 | IRQ_DISABLE(r20, r21) | ||
527 | .endif | ||
528 | mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
529 | |||
530 | /* | ||
531 | * Prepare the first 256 stack bytes to be rapidly accessible | 539 | * Prepare the first 256 stack bytes to be rapidly accessible |
532 | * without having to fetch the background data. | 540 | * without having to fetch the background data. |
533 | */ | 541 | */ |
@@ -736,9 +744,10 @@ STD_ENTRY(interrupt_return) | |||
736 | beqzt r30, .Lrestore_regs | 744 | beqzt r30, .Lrestore_regs |
737 | j 3f | 745 | j 3f |
738 | 2: TRACE_IRQS_ON | 746 | 2: TRACE_IRQS_ON |
747 | IRQ_ENABLE_LOAD(r20, r21) | ||
739 | movei r0, 1 | 748 | movei r0, 1 |
740 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | 749 | mtspr INTERRUPT_CRITICAL_SECTION, r0 |
741 | IRQ_ENABLE(r20, r21) | 750 | IRQ_ENABLE_APPLY(r20, r21) |
742 | beqzt r30, .Lrestore_regs | 751 | beqzt r30, .Lrestore_regs |
743 | 3: | 752 | 3: |
744 | 753 | ||
@@ -755,7 +764,6 @@ STD_ENTRY(interrupt_return) | |||
755 | * that will save some cycles if this turns out to be a syscall. | 764 | * that will save some cycles if this turns out to be a syscall. |
756 | */ | 765 | */ |
757 | .Lrestore_regs: | 766 | .Lrestore_regs: |
758 | FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */ | ||
759 | 767 | ||
760 | /* | 768 | /* |
761 | * Rotate so we have one high bit and one low bit to test. | 769 | * Rotate so we have one high bit and one low bit to test. |
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 6a9d20ddc34f..1e4633520b35 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -444,6 +444,7 @@ static pgd_t pgtables[PTRS_PER_PGD] | |||
444 | */ | 444 | */ |
445 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | 445 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) |
446 | { | 446 | { |
447 | unsigned long long irqmask; | ||
447 | unsigned long address, pfn; | 448 | unsigned long address, pfn; |
448 | pmd_t *pmd; | 449 | pmd_t *pmd; |
449 | pte_t *pte; | 450 | pte_t *pte; |
@@ -633,10 +634,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |||
633 | * - install pgtables[] as the real page table | 634 | * - install pgtables[] as the real page table |
634 | * - flush the TLB so the new page table takes effect | 635 | * - flush the TLB so the new page table takes effect |
635 | */ | 636 | */ |
637 | irqmask = interrupt_mask_save_mask(); | ||
638 | interrupt_mask_set_mask(-1ULL); | ||
636 | rc = flush_and_install_context(__pa(pgtables), | 639 | rc = flush_and_install_context(__pa(pgtables), |
637 | init_pgprot((unsigned long)pgtables), | 640 | init_pgprot((unsigned long)pgtables), |
638 | __get_cpu_var(current_asid), | 641 | __get_cpu_var(current_asid), |
639 | cpumask_bits(my_cpu_mask)); | 642 | cpumask_bits(my_cpu_mask)); |
643 | interrupt_mask_restore_mask(irqmask); | ||
640 | BUG_ON(rc != 0); | 644 | BUG_ON(rc != 0); |
641 | 645 | ||
642 | /* Copy the page table back to the normal swapper_pg_dir. */ | 646 | /* Copy the page table back to the normal swapper_pg_dir. */ |
diff --git a/arch/tile/mm/migrate.h b/arch/tile/mm/migrate.h index cd45a0837fa6..91683d97917e 100644 --- a/arch/tile/mm/migrate.h +++ b/arch/tile/mm/migrate.h | |||
@@ -24,6 +24,9 @@ | |||
24 | /* | 24 | /* |
25 | * This function is used as a helper when setting up the initial | 25 | * This function is used as a helper when setting up the initial |
26 | * page table (swapper_pg_dir). | 26 | * page table (swapper_pg_dir). |
27 | * | ||
28 | * You must mask ALL interrupts prior to invoking this code, since | ||
29 | * you can't legally touch the stack during the cache flush. | ||
27 | */ | 30 | */ |
28 | extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access, | 31 | extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access, |
29 | HV_ASID asid, | 32 | HV_ASID asid, |
@@ -39,6 +42,9 @@ extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access, | |||
39 | * | 42 | * |
40 | * Note that any non-NULL pointers must not point to the page that | 43 | * Note that any non-NULL pointers must not point to the page that |
41 | * is handled by the stack_pte itself. | 44 | * is handled by the stack_pte itself. |
45 | * | ||
46 | * You must mask ALL interrupts prior to invoking this code, since | ||
47 | * you can't legally touch the stack during the cache flush. | ||
42 | */ | 48 | */ |
43 | extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va, | 49 | extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va, |
44 | size_t length, pte_t *stack_ptep, | 50 | size_t length, pte_t *stack_ptep, |
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S index ac01a7cdf77f..5305814bf187 100644 --- a/arch/tile/mm/migrate_32.S +++ b/arch/tile/mm/migrate_32.S | |||
@@ -40,8 +40,7 @@ | |||
40 | #define FRAME_R32 16 | 40 | #define FRAME_R32 16 |
41 | #define FRAME_R33 20 | 41 | #define FRAME_R33 20 |
42 | #define FRAME_R34 24 | 42 | #define FRAME_R34 24 |
43 | #define FRAME_R35 28 | 43 | #define FRAME_SIZE 28 |
44 | #define FRAME_SIZE 32 | ||
45 | 44 | ||
46 | 45 | ||
47 | 46 | ||
@@ -66,12 +65,11 @@ | |||
66 | #define r_my_cpumask r5 | 65 | #define r_my_cpumask r5 |
67 | 66 | ||
68 | /* Locals (callee-save); must not be more than FRAME_xxx above. */ | 67 | /* Locals (callee-save); must not be more than FRAME_xxx above. */ |
69 | #define r_save_ics r30 | 68 | #define r_context_lo r30 |
70 | #define r_context_lo r31 | 69 | #define r_context_hi r31 |
71 | #define r_context_hi r32 | 70 | #define r_access_lo r32 |
72 | #define r_access_lo r33 | 71 | #define r_access_hi r33 |
73 | #define r_access_hi r34 | 72 | #define r_asid r34 |
74 | #define r_asid r35 | ||
75 | 73 | ||
76 | STD_ENTRY(flush_and_install_context) | 74 | STD_ENTRY(flush_and_install_context) |
77 | /* | 75 | /* |
@@ -104,11 +102,7 @@ STD_ENTRY(flush_and_install_context) | |||
104 | sw r_tmp, r33 | 102 | sw r_tmp, r33 |
105 | addi r_tmp, sp, FRAME_R34 | 103 | addi r_tmp, sp, FRAME_R34 |
106 | } | 104 | } |
107 | { | 105 | sw r_tmp, r34 |
108 | sw r_tmp, r34 | ||
109 | addi r_tmp, sp, FRAME_R35 | ||
110 | } | ||
111 | sw r_tmp, r35 | ||
112 | 106 | ||
113 | /* Move some arguments to callee-save registers. */ | 107 | /* Move some arguments to callee-save registers. */ |
114 | { | 108 | { |
@@ -121,13 +115,6 @@ STD_ENTRY(flush_and_install_context) | |||
121 | } | 115 | } |
122 | move r_asid, r_asid_in | 116 | move r_asid, r_asid_in |
123 | 117 | ||
124 | /* Disable interrupts, since we can't use our stack. */ | ||
125 | { | ||
126 | mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION | ||
127 | movei r_tmp, 1 | ||
128 | } | ||
129 | mtspr INTERRUPT_CRITICAL_SECTION, r_tmp | ||
130 | |||
131 | /* First, flush our L2 cache. */ | 118 | /* First, flush our L2 cache. */ |
132 | { | 119 | { |
133 | move r0, zero /* cache_pa */ | 120 | move r0, zero /* cache_pa */ |
@@ -163,7 +150,7 @@ STD_ENTRY(flush_and_install_context) | |||
163 | } | 150 | } |
164 | { | 151 | { |
165 | move r4, r_asid | 152 | move r4, r_asid |
166 | movei r5, HV_CTX_DIRECTIO | 153 | moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG |
167 | } | 154 | } |
168 | jal hv_install_context | 155 | jal hv_install_context |
169 | bnz r0, .Ldone | 156 | bnz r0, .Ldone |
@@ -175,9 +162,6 @@ STD_ENTRY(flush_and_install_context) | |||
175 | } | 162 | } |
176 | 163 | ||
177 | .Ldone: | 164 | .Ldone: |
178 | /* Reset interrupts back how they were before. */ | ||
179 | mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics | ||
180 | |||
181 | /* Restore the callee-saved registers and return. */ | 165 | /* Restore the callee-saved registers and return. */ |
182 | addli lr, sp, FRAME_SIZE | 166 | addli lr, sp, FRAME_SIZE |
183 | { | 167 | { |
@@ -202,10 +186,6 @@ STD_ENTRY(flush_and_install_context) | |||
202 | } | 186 | } |
203 | { | 187 | { |
204 | lw r34, r_tmp | 188 | lw r34, r_tmp |
205 | addli r_tmp, sp, FRAME_R35 | ||
206 | } | ||
207 | { | ||
208 | lw r35, r_tmp | ||
209 | addi sp, sp, FRAME_SIZE | 189 | addi sp, sp, FRAME_SIZE |
210 | } | 190 | } |
211 | jrp lr | 191 | jrp lr |
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S index e76fea688beb..1d15b10833d1 100644 --- a/arch/tile/mm/migrate_64.S +++ b/arch/tile/mm/migrate_64.S | |||
@@ -38,8 +38,7 @@ | |||
38 | #define FRAME_R30 16 | 38 | #define FRAME_R30 16 |
39 | #define FRAME_R31 24 | 39 | #define FRAME_R31 24 |
40 | #define FRAME_R32 32 | 40 | #define FRAME_R32 32 |
41 | #define FRAME_R33 40 | 41 | #define FRAME_SIZE 40 |
42 | #define FRAME_SIZE 48 | ||
43 | 42 | ||
44 | 43 | ||
45 | 44 | ||
@@ -60,10 +59,9 @@ | |||
60 | #define r_my_cpumask r3 | 59 | #define r_my_cpumask r3 |
61 | 60 | ||
62 | /* Locals (callee-save); must not be more than FRAME_xxx above. */ | 61 | /* Locals (callee-save); must not be more than FRAME_xxx above. */ |
63 | #define r_save_ics r30 | 62 | #define r_context r30 |
64 | #define r_context r31 | 63 | #define r_access r31 |
65 | #define r_access r32 | 64 | #define r_asid r32 |
66 | #define r_asid r33 | ||
67 | 65 | ||
68 | /* | 66 | /* |
69 | * Caller-save locals and frame constants are the same as | 67 | * Caller-save locals and frame constants are the same as |
@@ -93,11 +91,7 @@ STD_ENTRY(flush_and_install_context) | |||
93 | st r_tmp, r31 | 91 | st r_tmp, r31 |
94 | addi r_tmp, sp, FRAME_R32 | 92 | addi r_tmp, sp, FRAME_R32 |
95 | } | 93 | } |
96 | { | 94 | st r_tmp, r32 |
97 | st r_tmp, r32 | ||
98 | addi r_tmp, sp, FRAME_R33 | ||
99 | } | ||
100 | st r_tmp, r33 | ||
101 | 95 | ||
102 | /* Move some arguments to callee-save registers. */ | 96 | /* Move some arguments to callee-save registers. */ |
103 | { | 97 | { |
@@ -106,13 +100,6 @@ STD_ENTRY(flush_and_install_context) | |||
106 | } | 100 | } |
107 | move r_asid, r_asid_in | 101 | move r_asid, r_asid_in |
108 | 102 | ||
109 | /* Disable interrupts, since we can't use our stack. */ | ||
110 | { | ||
111 | mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION | ||
112 | movei r_tmp, 1 | ||
113 | } | ||
114 | mtspr INTERRUPT_CRITICAL_SECTION, r_tmp | ||
115 | |||
116 | /* First, flush our L2 cache. */ | 103 | /* First, flush our L2 cache. */ |
117 | { | 104 | { |
118 | move r0, zero /* cache_pa */ | 105 | move r0, zero /* cache_pa */ |
@@ -147,7 +134,7 @@ STD_ENTRY(flush_and_install_context) | |||
147 | } | 134 | } |
148 | { | 135 | { |
149 | move r2, r_asid | 136 | move r2, r_asid |
150 | movei r3, HV_CTX_DIRECTIO | 137 | moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG |
151 | } | 138 | } |
152 | jal hv_install_context | 139 | jal hv_install_context |
153 | bnez r0, 1f | 140 | bnez r0, 1f |
@@ -158,10 +145,7 @@ STD_ENTRY(flush_and_install_context) | |||
158 | jal hv_flush_all | 145 | jal hv_flush_all |
159 | } | 146 | } |
160 | 147 | ||
161 | 1: /* Reset interrupts back how they were before. */ | 148 | 1: /* Restore the callee-saved registers and return. */ |
162 | mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics | ||
163 | |||
164 | /* Restore the callee-saved registers and return. */ | ||
165 | addli lr, sp, FRAME_SIZE | 149 | addli lr, sp, FRAME_SIZE |
166 | { | 150 | { |
167 | ld lr, lr | 151 | ld lr, lr |
@@ -177,10 +161,6 @@ STD_ENTRY(flush_and_install_context) | |||
177 | } | 161 | } |
178 | { | 162 | { |
179 | ld r32, r_tmp | 163 | ld r32, r_tmp |
180 | addli r_tmp, sp, FRAME_R33 | ||
181 | } | ||
182 | { | ||
183 | ld r33, r_tmp | ||
184 | addi sp, sp, FRAME_SIZE | 164 | addi sp, sp, FRAME_SIZE |
185 | } | 165 | } |
186 | jrp lr | 166 | jrp lr |