diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/fsys.S | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/module.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/traps.c | 29 |
5 files changed, 44 insertions, 8 deletions
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 4f3cdef75797..962b6c4e32b5 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -460,9 +460,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set | |||
460 | ;; | 460 | ;; |
461 | 461 | ||
462 | st8 [r2]=r14 // update current->blocked with new mask | 462 | st8 [r2]=r14 // update current->blocked with new mask |
463 | cmpxchg4.acq r14=[r9],r18,ar.ccv // current->thread_info->flags <- r18 | 463 | cmpxchg4.acq r8=[r9],r18,ar.ccv // current->thread_info->flags <- r18 |
464 | ;; | 464 | ;; |
465 | cmp.ne p6,p0=r17,r14 // update failed? | 465 | cmp.ne p6,p0=r17,r8 // update failed? |
466 | (p6) br.cond.spnt.few 1b // yes -> retry | 466 | (p6) br.cond.spnt.few 1b // yes -> retry |
467 | 467 | ||
468 | #ifdef CONFIG_SMP | 468 | #ifdef CONFIG_SMP |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index febc091c2f02..f1aca7cffd12 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
@@ -825,14 +825,16 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind | |||
825 | * XXX Should have an arch-hook for running this after final section | 825 | * XXX Should have an arch-hook for running this after final section |
826 | * addresses have been selected... | 826 | * addresses have been selected... |
827 | */ | 827 | */ |
828 | /* See if gp can cover the entire core module: */ | 828 | uint64_t gp; |
829 | uint64_t gp = (uint64_t) mod->module_core + MAX_LTOFF / 2; | 829 | if (mod->core_size > MAX_LTOFF) |
830 | if (mod->core_size >= MAX_LTOFF) | ||
831 | /* | 830 | /* |
832 | * This takes advantage of fact that SHF_ARCH_SMALL gets allocated | 831 | * This takes advantage of fact that SHF_ARCH_SMALL gets allocated |
833 | * at the end of the module. | 832 | * at the end of the module. |
834 | */ | 833 | */ |
835 | gp = (uint64_t) mod->module_core + mod->core_size - MAX_LTOFF / 2; | 834 | gp = mod->core_size - MAX_LTOFF / 2; |
835 | else | ||
836 | gp = mod->core_size / 2; | ||
837 | gp = (uint64_t) mod->module_core + ((gp + 7) & -8); | ||
836 | mod->arch.gp = gp; | 838 | mod->arch.gp = gp; |
837 | DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); | 839 | DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); |
838 | } | 840 | } |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 08c8a5eb25ab..575a8f657b31 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -635,11 +635,17 @@ ia64_flush_fph (struct task_struct *task) | |||
635 | { | 635 | { |
636 | struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); | 636 | struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); |
637 | 637 | ||
638 | /* | ||
639 | * Prevent migrating this task while | ||
640 | * we're fiddling with the FPU state | ||
641 | */ | ||
642 | preempt_disable(); | ||
638 | if (ia64_is_local_fpu_owner(task) && psr->mfh) { | 643 | if (ia64_is_local_fpu_owner(task) && psr->mfh) { |
639 | psr->mfh = 0; | 644 | psr->mfh = 0; |
640 | task->thread.flags |= IA64_THREAD_FPH_VALID; | 645 | task->thread.flags |= IA64_THREAD_FPH_VALID; |
641 | ia64_save_fpu(&task->thread.fph[0]); | 646 | ia64_save_fpu(&task->thread.fph[0]); |
642 | } | 647 | } |
648 | preempt_enable(); | ||
643 | } | 649 | } |
644 | 650 | ||
645 | /* | 651 | /* |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index b7e6b4cb374b..d14692e0920a 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -720,7 +720,8 @@ cpu_init (void) | |||
720 | ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); | 720 | ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); |
721 | 721 | ||
722 | /* | 722 | /* |
723 | * Initialize default control register to defer all speculative faults. The | 723 | * Initialize default control register to defer speculative faults except |
724 | * for those arising from TLB misses, which are not deferred. The | ||
724 | * kernel MUST NOT depend on a particular setting of these bits (in other words, | 725 | * kernel MUST NOT depend on a particular setting of these bits (in other words, |
725 | * the kernel must have recovery code for all speculative accesses). Turn on | 726 | * the kernel must have recovery code for all speculative accesses). Turn on |
726 | * dcr.lc as per recommendation by the architecture team. Most IA-32 apps | 727 | * dcr.lc as per recommendation by the architecture team. Most IA-32 apps |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index e82ad78081b3..1861173bd4f6 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -111,6 +111,24 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | |||
111 | siginfo_t siginfo; | 111 | siginfo_t siginfo; |
112 | int sig, code; | 112 | int sig, code; |
113 | 113 | ||
114 | /* break.b always sets cr.iim to 0, which causes problems for | ||
115 | * debuggers. Get the real break number from the original instruction, | ||
116 | * but only for kernel code. User space break.b is left alone, to | ||
117 | * preserve the existing behaviour. All break codings have the same | ||
118 | * format, so there is no need to check the slot type. | ||
119 | */ | ||
120 | if (break_num == 0 && !user_mode(regs)) { | ||
121 | struct ia64_psr *ipsr = ia64_psr(regs); | ||
122 | unsigned long *bundle = (unsigned long *)regs->cr_iip; | ||
123 | unsigned long slot; | ||
124 | switch (ipsr->ri) { | ||
125 | case 0: slot = (bundle[0] >> 5); break; | ||
126 | case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break; | ||
127 | default: slot = (bundle[1] >> 23); break; | ||
128 | } | ||
129 | break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff); | ||
130 | } | ||
131 | |||
114 | /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ | 132 | /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ |
115 | siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); | 133 | siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); |
116 | siginfo.si_imm = break_num; | 134 | siginfo.si_imm = break_num; |
@@ -202,13 +220,21 @@ disabled_fph_fault (struct pt_regs *regs) | |||
202 | 220 | ||
203 | /* first, grant user-level access to fph partition: */ | 221 | /* first, grant user-level access to fph partition: */ |
204 | psr->dfh = 0; | 222 | psr->dfh = 0; |
223 | |||
224 | /* | ||
225 | * Make sure that no other task gets in on this processor | ||
226 | * while we're claiming the FPU | ||
227 | */ | ||
228 | preempt_disable(); | ||
205 | #ifndef CONFIG_SMP | 229 | #ifndef CONFIG_SMP |
206 | { | 230 | { |
207 | struct task_struct *fpu_owner | 231 | struct task_struct *fpu_owner |
208 | = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER); | 232 | = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER); |
209 | 233 | ||
210 | if (ia64_is_local_fpu_owner(current)) | 234 | if (ia64_is_local_fpu_owner(current)) { |
235 | preempt_enable_no_resched(); | ||
211 | return; | 236 | return; |
237 | } | ||
212 | 238 | ||
213 | if (fpu_owner) | 239 | if (fpu_owner) |
214 | ia64_flush_fph(fpu_owner); | 240 | ia64_flush_fph(fpu_owner); |
@@ -226,6 +252,7 @@ disabled_fph_fault (struct pt_regs *regs) | |||
226 | */ | 252 | */ |
227 | psr->mfh = 1; | 253 | psr->mfh = 1; |
228 | } | 254 | } |
255 | preempt_enable_no_resched(); | ||
229 | } | 256 | } |
230 | 257 | ||
231 | static inline int | 258 | static inline int |