diff options
| author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 16:38:45 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 16:38:45 -0400 |
| commit | 38cb162b7585d837083b8365da1eb32687c5164c (patch) | |
| tree | e4ae15715b23b320b1a92699fac767bc766c8f0b | |
| parent | ba7cc09c9c9e29a57045dc5bbf843ac1cfad3283 (diff) | |
| parent | e180583b85f4a48bd55924712c88e5d8eb182e08 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] wire up pselect, ppoll
[IA64] Add TIF_RESTORE_SIGMASK
[IA64] unwind did not work for processes born with CLONE_STOPPED
[IA64] Optional method to purge the TLB on SN systems
[IA64] SPIN_LOCK_UNLOCKED macro cleanup in arch/ia64
[IA64-SN2][KJ] mmtimer.c-kzalloc
[IA64] fix stack alignment for ia32 signal handlers
[IA64] - Altix: hotplug after intr redirect can crash system
[IA64] save and restore cpus_allowed in cpu_idle_wait
[IA64] Removal of percpu TR cleanup in kexec code
[IA64] Fix some section mismatch errors
| -rw-r--r-- | arch/ia64/ia32/ia32_entry.S | 39 | ||||
| -rw-r--r-- | arch/ia64/ia32/ia32_signal.c | 65 | ||||
| -rw-r--r-- | arch/ia64/kernel/entry.S | 30 | ||||
| -rw-r--r-- | arch/ia64/kernel/iosapic.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 27 | ||||
| -rw-r--r-- | arch/ia64/kernel/process.c | 8 | ||||
| -rw-r--r-- | arch/ia64/kernel/relocate_kernel.S | 11 | ||||
| -rw-r--r-- | arch/ia64/kernel/setup.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/sigframe.h | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/signal.c | 71 | ||||
| -rw-r--r-- | arch/ia64/kernel/smp.c | 68 | ||||
| -rw-r--r-- | arch/ia64/kernel/traps.c | 6 | ||||
| -rw-r--r-- | arch/ia64/kernel/unwind.c | 9 | ||||
| -rw-r--r-- | arch/ia64/mm/tlb.c | 6 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/irq.c | 58 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/sn2/sn2_smp.c | 65 | ||||
| -rw-r--r-- | drivers/char/mmtimer.c | 4 | ||||
| -rw-r--r-- | include/asm-ia64/hw_irq.h | 1 | ||||
| -rw-r--r-- | include/asm-ia64/iosapic.h | 2 | ||||
| -rw-r--r-- | include/asm-ia64/sn/sn_sal.h | 1 | ||||
| -rw-r--r-- | include/asm-ia64/thread_info.h | 4 | ||||
| -rw-r--r-- | include/asm-ia64/tlbflush.h | 11 | ||||
| -rw-r--r-- | include/asm-ia64/unistd.h | 5 |
23 files changed, 276 insertions, 221 deletions
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index 687e5fdc9683..99b665e2b1d5 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S | |||
| @@ -52,43 +52,6 @@ ENTRY(ia32_clone) | |||
| 52 | br.ret.sptk.many rp | 52 | br.ret.sptk.many rp |
| 53 | END(ia32_clone) | 53 | END(ia32_clone) |
| 54 | 54 | ||
| 55 | ENTRY(sys32_rt_sigsuspend) | ||
| 56 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
| 57 | alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs | ||
| 58 | mov loc0=rp | ||
| 59 | mov out0=in0 // mask | ||
| 60 | mov out1=in1 // sigsetsize | ||
| 61 | mov out2=sp // out2 = &sigscratch | ||
| 62 | .fframe 16 | ||
| 63 | adds sp=-16,sp // allocate dummy "sigscratch" | ||
| 64 | ;; | ||
| 65 | .body | ||
| 66 | br.call.sptk.many rp=ia32_rt_sigsuspend | ||
| 67 | 1: .restore sp | ||
| 68 | adds sp=16,sp | ||
| 69 | mov rp=loc0 | ||
| 70 | mov ar.pfs=loc1 | ||
| 71 | br.ret.sptk.many rp | ||
| 72 | END(sys32_rt_sigsuspend) | ||
| 73 | |||
| 74 | ENTRY(sys32_sigsuspend) | ||
| 75 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
| 76 | alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs | ||
| 77 | mov loc0=rp | ||
| 78 | mov out0=in2 // mask (first two args are ignored) | ||
| 79 | ;; | ||
| 80 | mov out1=sp // out1 = &sigscratch | ||
| 81 | .fframe 16 | ||
| 82 | adds sp=-16,sp // allocate dummy "sigscratch" | ||
| 83 | .body | ||
| 84 | br.call.sptk.many rp=ia32_sigsuspend | ||
| 85 | 1: .restore sp | ||
| 86 | adds sp=16,sp | ||
| 87 | mov rp=loc0 | ||
| 88 | mov ar.pfs=loc1 | ||
| 89 | br.ret.sptk.many rp | ||
| 90 | END(sys32_sigsuspend) | ||
| 91 | |||
| 92 | GLOBAL_ENTRY(ia32_ret_from_clone) | 55 | GLOBAL_ENTRY(ia32_ret_from_clone) |
| 93 | PT_REGS_UNWIND_INFO(0) | 56 | PT_REGS_UNWIND_INFO(0) |
| 94 | { /* | 57 | { /* |
| @@ -389,7 +352,7 @@ ia32_syscall_table: | |||
| 389 | data8 sys_rt_sigpending | 352 | data8 sys_rt_sigpending |
| 390 | data8 compat_sys_rt_sigtimedwait | 353 | data8 compat_sys_rt_sigtimedwait |
| 391 | data8 sys32_rt_sigqueueinfo | 354 | data8 sys32_rt_sigqueueinfo |
| 392 | data8 sys32_rt_sigsuspend | 355 | data8 compat_sys_rt_sigsuspend |
| 393 | data8 sys32_pread /* 180 */ | 356 | data8 sys32_pread /* 180 */ |
| 394 | data8 sys32_pwrite | 357 | data8 sys32_pwrite |
| 395 | data8 sys_chown /* 16-bit version */ | 358 | data8 sys_chown /* 16-bit version */ |
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index 10510e585204..85e82f32e480 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c | |||
| @@ -451,59 +451,20 @@ sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int r | |||
| 451 | sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler); | 451 | sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler); |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | long | 454 | asmlinkage long |
| 455 | __ia32_rt_sigsuspend (compat_sigset_t *sset, unsigned int sigsetsize, struct sigscratch *scr) | 455 | sys32_sigsuspend (int history0, int history1, old_sigset_t mask) |
| 456 | { | 456 | { |
| 457 | extern long ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall); | 457 | mask &= _BLOCKABLE; |
| 458 | sigset_t oldset, set; | ||
| 459 | |||
| 460 | scr->scratch_unat = 0; /* avoid leaking kernel bits to user level */ | ||
| 461 | memset(&set, 0, sizeof(set)); | ||
| 462 | |||
| 463 | memcpy(&set.sig, &sset->sig, sigsetsize); | ||
| 464 | |||
| 465 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
| 466 | |||
| 467 | spin_lock_irq(¤t->sighand->siglock); | 458 | spin_lock_irq(¤t->sighand->siglock); |
| 468 | { | 459 | current->saved_sigmask = current->blocked; |
| 469 | oldset = current->blocked; | 460 | siginitset(¤t->blocked, mask); |
| 470 | current->blocked = set; | 461 | recalc_sigpending(); |
| 471 | recalc_sigpending(); | ||
| 472 | } | ||
| 473 | spin_unlock_irq(¤t->sighand->siglock); | 462 | spin_unlock_irq(¤t->sighand->siglock); |
| 474 | 463 | ||
| 475 | /* | 464 | current->state = TASK_INTERRUPTIBLE; |
| 476 | * The return below usually returns to the signal handler. We need to pre-set the | 465 | schedule(); |
| 477 | * correct error code here to ensure that the right values get saved in sigcontext | 466 | set_thread_flag(TIF_RESTORE_SIGMASK); |
| 478 | * by ia64_do_signal. | 467 | return -ERESTARTNOHAND; |
| 479 | */ | ||
| 480 | scr->pt.r8 = -EINTR; | ||
| 481 | while (1) { | ||
| 482 | current->state = TASK_INTERRUPTIBLE; | ||
| 483 | schedule(); | ||
| 484 | if (ia64_do_signal(&oldset, scr, 1)) | ||
| 485 | return -EINTR; | ||
| 486 | } | ||
| 487 | } | ||
| 488 | |||
| 489 | asmlinkage long | ||
| 490 | ia32_rt_sigsuspend (compat_sigset_t __user *uset, unsigned int sigsetsize, struct sigscratch *scr) | ||
| 491 | { | ||
| 492 | compat_sigset_t set; | ||
| 493 | |||
| 494 | if (sigsetsize > sizeof(compat_sigset_t)) | ||
| 495 | return -EINVAL; | ||
| 496 | |||
| 497 | if (copy_from_user(&set.sig, &uset->sig, sigsetsize)) | ||
| 498 | return -EFAULT; | ||
| 499 | |||
| 500 | return __ia32_rt_sigsuspend(&set, sigsetsize, scr); | ||
| 501 | } | ||
| 502 | |||
| 503 | asmlinkage long | ||
| 504 | ia32_sigsuspend (unsigned int mask, struct sigscratch *scr) | ||
| 505 | { | ||
| 506 | return __ia32_rt_sigsuspend((compat_sigset_t *) &mask, sizeof(mask), scr); | ||
| 507 | } | 468 | } |
| 508 | 469 | ||
| 509 | asmlinkage long | 470 | asmlinkage long |
| @@ -810,7 +771,11 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
| 810 | } | 771 | } |
| 811 | /* Legacy stack switching not supported */ | 772 | /* Legacy stack switching not supported */ |
| 812 | 773 | ||
| 813 | return (void __user *)((esp - frame_size) & -8ul); | 774 | esp -= frame_size; |
| 775 | /* Align the stack pointer according to the i386 ABI, | ||
| 776 | * i.e. so that on function entry ((sp + 4) & 15) == 0. */ | ||
| 777 | esp = ((esp + 4) & -16ul) - 4; | ||
| 778 | return (void __user *) esp; | ||
| 814 | } | 779 | } |
| 815 | 780 | ||
| 816 | static int | 781 | static int |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 55fd2d5471e1..b50bf208678e 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
| @@ -1199,32 +1199,6 @@ ENTRY(notify_resume_user) | |||
| 1199 | br.ret.sptk.many rp | 1199 | br.ret.sptk.many rp |
| 1200 | END(notify_resume_user) | 1200 | END(notify_resume_user) |
| 1201 | 1201 | ||
| 1202 | GLOBAL_ENTRY(sys_rt_sigsuspend) | ||
| 1203 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | ||
| 1204 | alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! | ||
| 1205 | mov r9=ar.unat | ||
| 1206 | mov loc0=rp // save return address | ||
| 1207 | mov out0=in0 // mask | ||
| 1208 | mov out1=in1 // sigsetsize | ||
| 1209 | adds out2=8,sp // out2=&sigscratch->ar_pfs | ||
| 1210 | ;; | ||
| 1211 | .fframe 16 | ||
| 1212 | .spillsp ar.unat, 16 | ||
| 1213 | st8 [sp]=r9,-16 // allocate space for ar.unat and save it | ||
| 1214 | st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch | ||
| 1215 | .body | ||
| 1216 | br.call.sptk.many rp=ia64_rt_sigsuspend | ||
| 1217 | .ret17: .restore sp | ||
| 1218 | adds sp=16,sp // pop scratch stack space | ||
| 1219 | ;; | ||
| 1220 | ld8 r9=[sp] // load new unat from sw->caller_unat | ||
| 1221 | mov rp=loc0 | ||
| 1222 | ;; | ||
| 1223 | mov ar.unat=r9 | ||
| 1224 | mov ar.pfs=loc1 | ||
| 1225 | br.ret.sptk.many rp | ||
| 1226 | END(sys_rt_sigsuspend) | ||
| 1227 | |||
| 1228 | ENTRY(sys_rt_sigreturn) | 1202 | ENTRY(sys_rt_sigreturn) |
| 1229 | PT_REGS_UNWIND_INFO(0) | 1203 | PT_REGS_UNWIND_INFO(0) |
| 1230 | /* | 1204 | /* |
| @@ -1598,8 +1572,8 @@ sys_call_table: | |||
| 1598 | data8 sys_readlinkat | 1572 | data8 sys_readlinkat |
| 1599 | data8 sys_fchmodat | 1573 | data8 sys_fchmodat |
| 1600 | data8 sys_faccessat | 1574 | data8 sys_faccessat |
| 1601 | data8 sys_ni_syscall // reserved for pselect | 1575 | data8 sys_pselect6 |
| 1602 | data8 sys_ni_syscall // 1295 reserved for ppoll | 1576 | data8 sys_ppoll |
| 1603 | data8 sys_unshare | 1577 | data8 sys_unshare |
| 1604 | data8 sys_splice | 1578 | data8 sys_splice |
| 1605 | data8 sys_set_robust_list | 1579 | data8 sys_set_robust_list |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 93d9ab14ba24..37f46527d233 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
| @@ -1012,7 +1012,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, | |||
| 1012 | /* | 1012 | /* |
| 1013 | * ACPI calls this when it finds an entry for a legacy ISA IRQ override. | 1013 | * ACPI calls this when it finds an entry for a legacy ISA IRQ override. |
| 1014 | */ | 1014 | */ |
| 1015 | void __init | 1015 | void __devinit |
| 1016 | iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, | 1016 | iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, |
| 1017 | unsigned long polarity, | 1017 | unsigned long polarity, |
| 1018 | unsigned long trigger) | 1018 | unsigned long trigger) |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 1c5044a80958..dce5341303de 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <asm/machvec.h> | 38 | #include <asm/machvec.h> |
| 39 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
| 40 | #include <asm/system.h> | 40 | #include <asm/system.h> |
| 41 | #include <asm/tlbflush.h> | ||
| 41 | 42 | ||
| 42 | #ifdef CONFIG_PERFMON | 43 | #ifdef CONFIG_PERFMON |
| 43 | # include <asm/perfmon.h> | 44 | # include <asm/perfmon.h> |
| @@ -126,8 +127,10 @@ void destroy_irq(unsigned int irq) | |||
| 126 | 127 | ||
| 127 | #ifdef CONFIG_SMP | 128 | #ifdef CONFIG_SMP |
| 128 | # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) | 129 | # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) |
| 130 | # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH) | ||
| 129 | #else | 131 | #else |
| 130 | # define IS_RESCHEDULE(vec) (0) | 132 | # define IS_RESCHEDULE(vec) (0) |
| 133 | # define IS_LOCAL_TLB_FLUSH(vec) (0) | ||
| 131 | #endif | 134 | #endif |
| 132 | /* | 135 | /* |
| 133 | * That's where the IVT branches when we get an external | 136 | * That's where the IVT branches when we get an external |
| @@ -179,8 +182,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | |||
| 179 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); | 182 | saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); |
| 180 | ia64_srlz_d(); | 183 | ia64_srlz_d(); |
| 181 | while (vector != IA64_SPURIOUS_INT_VECTOR) { | 184 | while (vector != IA64_SPURIOUS_INT_VECTOR) { |
| 182 | if (unlikely(IS_RESCHEDULE(vector))) | 185 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { |
| 183 | kstat_this_cpu.irqs[vector]++; | 186 | smp_local_flush_tlb(); |
| 187 | kstat_this_cpu.irqs[vector]++; | ||
| 188 | } else if (unlikely(IS_RESCHEDULE(vector))) | ||
| 189 | kstat_this_cpu.irqs[vector]++; | ||
| 184 | else { | 190 | else { |
| 185 | ia64_setreg(_IA64_REG_CR_TPR, vector); | 191 | ia64_setreg(_IA64_REG_CR_TPR, vector); |
| 186 | ia64_srlz_d(); | 192 | ia64_srlz_d(); |
| @@ -226,8 +232,11 @@ void ia64_process_pending_intr(void) | |||
| 226 | * Perform normal interrupt style processing | 232 | * Perform normal interrupt style processing |
| 227 | */ | 233 | */ |
| 228 | while (vector != IA64_SPURIOUS_INT_VECTOR) { | 234 | while (vector != IA64_SPURIOUS_INT_VECTOR) { |
| 229 | if (unlikely(IS_RESCHEDULE(vector))) | 235 | if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { |
| 230 | kstat_this_cpu.irqs[vector]++; | 236 | smp_local_flush_tlb(); |
| 237 | kstat_this_cpu.irqs[vector]++; | ||
| 238 | } else if (unlikely(IS_RESCHEDULE(vector))) | ||
| 239 | kstat_this_cpu.irqs[vector]++; | ||
| 231 | else { | 240 | else { |
| 232 | struct pt_regs *old_regs = set_irq_regs(NULL); | 241 | struct pt_regs *old_regs = set_irq_regs(NULL); |
| 233 | 242 | ||
| @@ -259,12 +268,12 @@ void ia64_process_pending_intr(void) | |||
| 259 | 268 | ||
| 260 | 269 | ||
| 261 | #ifdef CONFIG_SMP | 270 | #ifdef CONFIG_SMP |
| 262 | extern irqreturn_t handle_IPI (int irq, void *dev_id); | ||
| 263 | 271 | ||
| 264 | static irqreturn_t dummy_handler (int irq, void *dev_id) | 272 | static irqreturn_t dummy_handler (int irq, void *dev_id) |
| 265 | { | 273 | { |
| 266 | BUG(); | 274 | BUG(); |
| 267 | } | 275 | } |
| 276 | extern irqreturn_t handle_IPI (int irq, void *dev_id); | ||
| 268 | 277 | ||
| 269 | static struct irqaction ipi_irqaction = { | 278 | static struct irqaction ipi_irqaction = { |
| 270 | .handler = handle_IPI, | 279 | .handler = handle_IPI, |
| @@ -277,6 +286,13 @@ static struct irqaction resched_irqaction = { | |||
| 277 | .flags = IRQF_DISABLED, | 286 | .flags = IRQF_DISABLED, |
| 278 | .name = "resched" | 287 | .name = "resched" |
| 279 | }; | 288 | }; |
| 289 | |||
| 290 | static struct irqaction tlb_irqaction = { | ||
| 291 | .handler = dummy_handler, | ||
| 292 | .flags = SA_INTERRUPT, | ||
| 293 | .name = "tlb_flush" | ||
| 294 | }; | ||
| 295 | |||
| 280 | #endif | 296 | #endif |
| 281 | 297 | ||
| 282 | void | 298 | void |
| @@ -302,6 +318,7 @@ init_IRQ (void) | |||
| 302 | #ifdef CONFIG_SMP | 318 | #ifdef CONFIG_SMP |
| 303 | register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); | 319 | register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); |
| 304 | register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); | 320 | register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); |
| 321 | register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); | ||
| 305 | #endif | 322 | #endif |
| 306 | #ifdef CONFIG_PERFMON | 323 | #ifdef CONFIG_PERFMON |
| 307 | pfm_init_percpu(); | 324 | pfm_init_percpu(); |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 8bb571a8a738..d1c3ed9943e5 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
| @@ -155,7 +155,7 @@ show_regs (struct pt_regs *regs) | |||
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | void | 157 | void |
| 158 | do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall) | 158 | do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall) |
| 159 | { | 159 | { |
| 160 | if (fsys_mode(current, &scr->pt)) { | 160 | if (fsys_mode(current, &scr->pt)) { |
| 161 | /* defer signal-handling etc. until we return to privilege-level 0. */ | 161 | /* defer signal-handling etc. until we return to privilege-level 0. */ |
| @@ -170,8 +170,8 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall | |||
| 170 | #endif | 170 | #endif |
| 171 | 171 | ||
| 172 | /* deal with pending signal delivery */ | 172 | /* deal with pending signal delivery */ |
| 173 | if (test_thread_flag(TIF_SIGPENDING)) | 173 | if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK)) |
| 174 | ia64_do_signal(oldset, scr, in_syscall); | 174 | ia64_do_signal(scr, in_syscall); |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | static int pal_halt = 1; | 177 | static int pal_halt = 1; |
| @@ -236,6 +236,7 @@ void cpu_idle_wait(void) | |||
| 236 | { | 236 | { |
| 237 | unsigned int cpu, this_cpu = get_cpu(); | 237 | unsigned int cpu, this_cpu = get_cpu(); |
| 238 | cpumask_t map; | 238 | cpumask_t map; |
| 239 | cpumask_t tmp = current->cpus_allowed; | ||
| 239 | 240 | ||
| 240 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | 241 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); |
| 241 | put_cpu(); | 242 | put_cpu(); |
| @@ -257,6 +258,7 @@ void cpu_idle_wait(void) | |||
| 257 | } | 258 | } |
| 258 | cpus_and(map, map, cpu_online_map); | 259 | cpus_and(map, map, cpu_online_map); |
| 259 | } while (!cpus_empty(map)); | 260 | } while (!cpus_empty(map)); |
| 261 | set_cpus_allowed(current, tmp); | ||
| 260 | } | 262 | } |
| 261 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 263 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
| 262 | 264 | ||
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S index ae473e3f2a0d..903babd22d62 100644 --- a/arch/ia64/kernel/relocate_kernel.S +++ b/arch/ia64/kernel/relocate_kernel.S | |||
| @@ -94,7 +94,7 @@ GLOBAL_ENTRY(relocate_new_kernel) | |||
| 94 | 4: | 94 | 4: |
| 95 | srlz.i | 95 | srlz.i |
| 96 | ;; | 96 | ;; |
| 97 | //purge TR entry for kernel text and data | 97 | // purge TR entry for kernel text and data |
| 98 | movl r16=KERNEL_START | 98 | movl r16=KERNEL_START |
| 99 | mov r18=KERNEL_TR_PAGE_SHIFT<<2 | 99 | mov r18=KERNEL_TR_PAGE_SHIFT<<2 |
| 100 | ;; | 100 | ;; |
| @@ -104,15 +104,6 @@ GLOBAL_ENTRY(relocate_new_kernel) | |||
| 104 | srlz.i | 104 | srlz.i |
| 105 | ;; | 105 | ;; |
| 106 | 106 | ||
| 107 | // purge TR entry for percpu data | ||
| 108 | movl r16=PERCPU_ADDR | ||
| 109 | mov r18=PERCPU_PAGE_SHIFT<<2 | ||
| 110 | ;; | ||
| 111 | ptr.d r16,r18 | ||
| 112 | ;; | ||
| 113 | srlz.d | ||
| 114 | ;; | ||
| 115 | |||
| 116 | // purge TR entry for pal code | 107 | // purge TR entry for pal code |
| 117 | mov r16=in3 | 108 | mov r16=in3 |
| 118 | mov r18=IA64_GRANULE_SHIFT<<2 | 109 | mov r18=IA64_GRANULE_SHIFT<<2 |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 6e19da122ae3..9df1efe7487d 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
| @@ -786,7 +786,7 @@ identify_cpu (struct cpuinfo_ia64 *c) | |||
| 786 | c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); | 786 | c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); |
| 787 | } | 787 | } |
| 788 | 788 | ||
| 789 | void | 789 | void __init |
| 790 | setup_per_cpu_areas (void) | 790 | setup_per_cpu_areas (void) |
| 791 | { | 791 | { |
| 792 | /* start_kernel() requires this... */ | 792 | /* start_kernel() requires this... */ |
diff --git a/arch/ia64/kernel/sigframe.h b/arch/ia64/kernel/sigframe.h index 37b986cb86e0..9fd9a1933b3d 100644 --- a/arch/ia64/kernel/sigframe.h +++ b/arch/ia64/kernel/sigframe.h | |||
| @@ -22,4 +22,4 @@ struct sigframe { | |||
| 22 | struct sigcontext sc; | 22 | struct sigcontext sc; |
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| 25 | extern long ia64_do_signal (sigset_t *, struct sigscratch *, long); | 25 | extern void ia64_do_signal (struct sigscratch *, long); |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 0dcd56da6001..aeec8184e862 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
| @@ -40,47 +40,6 @@ | |||
| 40 | # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) | 40 | # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) |
| 41 | #endif | 41 | #endif |
| 42 | 42 | ||
| 43 | long | ||
| 44 | ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch *scr) | ||
| 45 | { | ||
| 46 | sigset_t oldset, set; | ||
| 47 | |||
| 48 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
| 49 | if (sigsetsize != sizeof(sigset_t)) | ||
| 50 | return -EINVAL; | ||
| 51 | |||
| 52 | if (!access_ok(VERIFY_READ, uset, sigsetsize)) | ||
| 53 | return -EFAULT; | ||
| 54 | |||
| 55 | if (GET_SIGSET(&set, uset)) | ||
| 56 | return -EFAULT; | ||
| 57 | |||
| 58 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
| 59 | |||
| 60 | spin_lock_irq(¤t->sighand->siglock); | ||
| 61 | { | ||
| 62 | oldset = current->blocked; | ||
| 63 | current->blocked = set; | ||
| 64 | recalc_sigpending(); | ||
| 65 | } | ||
| 66 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 67 | |||
| 68 | /* | ||
| 69 | * The return below usually returns to the signal handler. We need to | ||
| 70 | * pre-set the correct error code here to ensure that the right values | ||
| 71 | * get saved in sigcontext by ia64_do_signal. | ||
| 72 | */ | ||
| 73 | scr->pt.r8 = EINTR; | ||
| 74 | scr->pt.r10 = -1; | ||
| 75 | |||
| 76 | while (1) { | ||
| 77 | current->state = TASK_INTERRUPTIBLE; | ||
| 78 | schedule(); | ||
| 79 | if (ia64_do_signal(&oldset, scr, 1)) | ||
| 80 | return -EINTR; | ||
| 81 | } | ||
| 82 | } | ||
| 83 | |||
| 84 | asmlinkage long | 43 | asmlinkage long |
| 85 | sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, | 44 | sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, |
| 86 | long arg3, long arg4, long arg5, long arg6, long arg7, | 45 | long arg3, long arg4, long arg5, long arg6, long arg7, |
| @@ -477,10 +436,11 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse | |||
| 477 | * Note that `init' is a special process: it doesn't get signals it doesn't want to | 436 | * Note that `init' is a special process: it doesn't get signals it doesn't want to |
| 478 | * handle. Thus you cannot kill init even with a SIGKILL even by mistake. | 437 | * handle. Thus you cannot kill init even with a SIGKILL even by mistake. |
| 479 | */ | 438 | */ |
| 480 | long | 439 | void |
| 481 | ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) | 440 | ia64_do_signal (struct sigscratch *scr, long in_syscall) |
| 482 | { | 441 | { |
| 483 | struct k_sigaction ka; | 442 | struct k_sigaction ka; |
| 443 | sigset_t *oldset; | ||
| 484 | siginfo_t info; | 444 | siginfo_t info; |
| 485 | long restart = in_syscall; | 445 | long restart = in_syscall; |
| 486 | long errno = scr->pt.r8; | 446 | long errno = scr->pt.r8; |
| @@ -492,9 +452,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) | |||
| 492 | * doing anything if so. | 452 | * doing anything if so. |
| 493 | */ | 453 | */ |
| 494 | if (!user_mode(&scr->pt)) | 454 | if (!user_mode(&scr->pt)) |
| 495 | return 0; | 455 | return; |
| 496 | 456 | ||
| 497 | if (!oldset) | 457 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
| 458 | oldset = ¤t->saved_sigmask; | ||
| 459 | else | ||
| 498 | oldset = ¤t->blocked; | 460 | oldset = ¤t->blocked; |
| 499 | 461 | ||
| 500 | /* | 462 | /* |
| @@ -557,8 +519,15 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) | |||
| 557 | * Whee! Actually deliver the signal. If the delivery failed, we need to | 519 | * Whee! Actually deliver the signal. If the delivery failed, we need to |
| 558 | * continue to iterate in this loop so we can deliver the SIGSEGV... | 520 | * continue to iterate in this loop so we can deliver the SIGSEGV... |
| 559 | */ | 521 | */ |
| 560 | if (handle_signal(signr, &ka, &info, oldset, scr)) | 522 | if (handle_signal(signr, &ka, &info, oldset, scr)) { |
| 561 | return 1; | 523 | /* a signal was successfully delivered; the saved |
| 524 | * sigmask will have been stored in the signal frame, | ||
| 525 | * and will be restored by sigreturn, so we can simply | ||
| 526 | * clear the TIF_RESTORE_SIGMASK flag */ | ||
| 527 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | ||
| 528 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
| 529 | return; | ||
| 530 | } | ||
| 562 | } | 531 | } |
| 563 | 532 | ||
| 564 | /* Did we come from a system call? */ | 533 | /* Did we come from a system call? */ |
| @@ -584,5 +553,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) | |||
| 584 | } | 553 | } |
| 585 | } | 554 | } |
| 586 | } | 555 | } |
| 587 | return 0; | 556 | |
| 557 | /* if there's no signal to deliver, we just put the saved sigmask | ||
| 558 | * back */ | ||
| 559 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | ||
| 560 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
| 561 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
| 562 | } | ||
| 588 | } | 563 | } |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 55ddd809b02d..221de3804560 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
| @@ -50,6 +50,18 @@ | |||
| 50 | #include <asm/mca.h> | 50 | #include <asm/mca.h> |
| 51 | 51 | ||
| 52 | /* | 52 | /* |
| 53 | * Note: alignment of 4 entries/cacheline was empirically determined | ||
| 54 | * to be a good tradeoff between hot cachelines & spreading the array | ||
| 55 | * across too many cacheline. | ||
| 56 | */ | ||
| 57 | static struct local_tlb_flush_counts { | ||
| 58 | unsigned int count; | ||
| 59 | } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; | ||
| 60 | |||
| 61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; | ||
| 62 | |||
| 63 | |||
| 64 | /* | ||
| 53 | * Structure and data for smp_call_function(). This is designed to minimise static memory | 65 | * Structure and data for smp_call_function(). This is designed to minimise static memory |
| 54 | * requirements. It also looks cleaner. | 66 | * requirements. It also looks cleaner. |
| 55 | */ | 67 | */ |
| @@ -248,6 +260,62 @@ smp_send_reschedule (int cpu) | |||
| 248 | platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); | 260 | platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); |
| 249 | } | 261 | } |
| 250 | 262 | ||
| 263 | /* | ||
| 264 | * Called with preeemption disabled. | ||
| 265 | */ | ||
| 266 | static void | ||
| 267 | smp_send_local_flush_tlb (int cpu) | ||
| 268 | { | ||
| 269 | platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0); | ||
| 270 | } | ||
| 271 | |||
| 272 | void | ||
| 273 | smp_local_flush_tlb(void) | ||
| 274 | { | ||
| 275 | /* | ||
| 276 | * Use atomic ops. Otherwise, the load/increment/store sequence from | ||
| 277 | * a "++" operation can have the line stolen between the load & store. | ||
| 278 | * The overhead of the atomic op in negligible in this case & offers | ||
| 279 | * significant benefit for the brief periods where lots of cpus | ||
| 280 | * are simultaneously flushing TLBs. | ||
| 281 | */ | ||
| 282 | ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq); | ||
| 283 | local_flush_tlb_all(); | ||
| 284 | } | ||
| 285 | |||
| 286 | #define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */ | ||
| 287 | |||
| 288 | void | ||
| 289 | smp_flush_tlb_cpumask(cpumask_t xcpumask) | ||
| 290 | { | ||
| 291 | unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts); | ||
| 292 | cpumask_t cpumask = xcpumask; | ||
| 293 | int mycpu, cpu, flush_mycpu = 0; | ||
| 294 | |||
| 295 | preempt_disable(); | ||
| 296 | mycpu = smp_processor_id(); | ||
| 297 | |||
| 298 | for_each_cpu_mask(cpu, cpumask) | ||
| 299 | counts[cpu] = local_tlb_flush_counts[cpu].count; | ||
| 300 | |||
| 301 | mb(); | ||
| 302 | for_each_cpu_mask(cpu, cpumask) { | ||
| 303 | if (cpu == mycpu) | ||
| 304 | flush_mycpu = 1; | ||
| 305 | else | ||
| 306 | smp_send_local_flush_tlb(cpu); | ||
| 307 | } | ||
| 308 | |||
| 309 | if (flush_mycpu) | ||
| 310 | smp_local_flush_tlb(); | ||
| 311 | |||
| 312 | for_each_cpu_mask(cpu, cpumask) | ||
| 313 | while(counts[cpu] == local_tlb_flush_counts[cpu].count) | ||
| 314 | udelay(FLUSH_DELAY); | ||
| 315 | |||
| 316 | preempt_enable(); | ||
| 317 | } | ||
| 318 | |||
| 251 | void | 319 | void |
| 252 | smp_flush_tlb_all (void) | 320 | smp_flush_tlb_all (void) |
| 253 | { | 321 | { |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 5bfb8be02b70..b8e0d70bf989 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
| @@ -43,9 +43,9 @@ die (const char *str, struct pt_regs *regs, long err) | |||
| 43 | u32 lock_owner; | 43 | u32 lock_owner; |
| 44 | int lock_owner_depth; | 44 | int lock_owner_depth; |
| 45 | } die = { | 45 | } die = { |
| 46 | .lock = SPIN_LOCK_UNLOCKED, | 46 | .lock = __SPIN_LOCK_UNLOCKED(die.lock), |
| 47 | .lock_owner = -1, | 47 | .lock_owner = -1, |
| 48 | .lock_owner_depth = 0 | 48 | .lock_owner_depth = 0 |
| 49 | }; | 49 | }; |
| 50 | static int die_counter; | 50 | static int die_counter; |
| 51 | int cpu = get_cpu(); | 51 | int cpu = get_cpu(); |
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index 93d5a3b41f69..fe1426266b9b 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c | |||
| @@ -60,6 +60,7 @@ | |||
| 60 | # define UNW_DEBUG_ON(n) unw_debug_level >= n | 60 | # define UNW_DEBUG_ON(n) unw_debug_level >= n |
| 61 | /* Do not code a printk level, not all debug lines end in newline */ | 61 | /* Do not code a printk level, not all debug lines end in newline */ |
| 62 | # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) | 62 | # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) |
| 63 | # undef inline | ||
| 63 | # define inline | 64 | # define inline |
| 64 | #else /* !UNW_DEBUG */ | 65 | #else /* !UNW_DEBUG */ |
| 65 | # define UNW_DEBUG_ON(n) 0 | 66 | # define UNW_DEBUG_ON(n) 0 |
| @@ -145,7 +146,7 @@ static struct { | |||
| 145 | # endif | 146 | # endif |
| 146 | } unw = { | 147 | } unw = { |
| 147 | .tables = &unw.kernel_table, | 148 | .tables = &unw.kernel_table, |
| 148 | .lock = SPIN_LOCK_UNLOCKED, | 149 | .lock = __SPIN_LOCK_UNLOCKED(unw.lock), |
| 149 | .save_order = { | 150 | .save_order = { |
| 150 | UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, | 151 | UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, |
| 151 | UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR | 152 | UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR |
| @@ -1943,9 +1944,9 @@ EXPORT_SYMBOL(unw_unwind); | |||
| 1943 | int | 1944 | int |
| 1944 | unw_unwind_to_user (struct unw_frame_info *info) | 1945 | unw_unwind_to_user (struct unw_frame_info *info) |
| 1945 | { | 1946 | { |
| 1946 | unsigned long ip, sp, pr = 0; | 1947 | unsigned long ip, sp, pr = info->pr; |
| 1947 | 1948 | ||
| 1948 | while (unw_unwind(info) >= 0) { | 1949 | do { |
| 1949 | unw_get_sp(info, &sp); | 1950 | unw_get_sp(info, &sp); |
| 1950 | if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) | 1951 | if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) |
| 1951 | < IA64_PT_REGS_SIZE) { | 1952 | < IA64_PT_REGS_SIZE) { |
| @@ -1963,7 +1964,7 @@ unw_unwind_to_user (struct unw_frame_info *info) | |||
| 1963 | __FUNCTION__, ip); | 1964 | __FUNCTION__, ip); |
| 1964 | return -1; | 1965 | return -1; |
| 1965 | } | 1966 | } |
| 1966 | } | 1967 | } while (unw_unwind(info) >= 0); |
| 1967 | unw_get_ip(info, &ip); | 1968 | unw_get_ip(info, &ip); |
| 1968 | UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", | 1969 | UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", |
| 1969 | __FUNCTION__, ip); | 1970 | __FUNCTION__, ip); |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index ffad7624436c..fa4e6d4810f3 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
| @@ -32,9 +32,9 @@ static struct { | |||
| 32 | } purge; | 32 | } purge; |
| 33 | 33 | ||
| 34 | struct ia64_ctx ia64_ctx = { | 34 | struct ia64_ctx ia64_ctx = { |
| 35 | .lock = SPIN_LOCK_UNLOCKED, | 35 | .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock), |
| 36 | .next = 1, | 36 | .next = 1, |
| 37 | .max_ctx = ~0U | 37 | .max_ctx = ~0U |
| 38 | }; | 38 | }; |
| 39 | 39 | ||
| 40 | DEFINE_PER_CPU(u8, ia64_need_tlb_flush); | 40 | DEFINE_PER_CPU(u8, ia64_need_tlb_flush); |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 8d2a1bfbfe7c..7f6d2360a262 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
| @@ -59,6 +59,22 @@ void sn_intr_free(nasid_t local_nasid, int local_widget, | |||
| 59 | (u64) sn_irq_info->irq_cookie, 0, 0); | 59 | (u64) sn_irq_info->irq_cookie, 0, 0); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | u64 sn_intr_redirect(nasid_t local_nasid, int local_widget, | ||
| 63 | struct sn_irq_info *sn_irq_info, | ||
| 64 | nasid_t req_nasid, int req_slice) | ||
| 65 | { | ||
| 66 | struct ia64_sal_retval ret_stuff; | ||
| 67 | ret_stuff.status = 0; | ||
| 68 | ret_stuff.v0 = 0; | ||
| 69 | |||
| 70 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, | ||
| 71 | (u64) SAL_INTR_REDIRECT, (u64) local_nasid, | ||
| 72 | (u64) local_widget, __pa(sn_irq_info), | ||
| 73 | (u64) req_nasid, (u64) req_slice, 0); | ||
| 74 | |||
| 75 | return ret_stuff.status; | ||
| 76 | } | ||
| 77 | |||
| 62 | static unsigned int sn_startup_irq(unsigned int irq) | 78 | static unsigned int sn_startup_irq(unsigned int irq) |
| 63 | { | 79 | { |
| 64 | return 0; | 80 | return 0; |
| @@ -127,15 +143,8 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, | |||
| 127 | struct sn_irq_info *new_irq_info; | 143 | struct sn_irq_info *new_irq_info; |
| 128 | struct sn_pcibus_provider *pci_provider; | 144 | struct sn_pcibus_provider *pci_provider; |
| 129 | 145 | ||
| 130 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); | 146 | bridge = (u64) sn_irq_info->irq_bridge; |
| 131 | if (new_irq_info == NULL) | ||
| 132 | return NULL; | ||
| 133 | |||
| 134 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); | ||
| 135 | |||
| 136 | bridge = (u64) new_irq_info->irq_bridge; | ||
| 137 | if (!bridge) { | 147 | if (!bridge) { |
| 138 | kfree(new_irq_info); | ||
| 139 | return NULL; /* irq is not a device interrupt */ | 148 | return NULL; /* irq is not a device interrupt */ |
| 140 | } | 149 | } |
| 141 | 150 | ||
| @@ -145,8 +154,25 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, | |||
| 145 | local_widget = TIO_SWIN_WIDGETNUM(bridge); | 154 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
| 146 | else | 155 | else |
| 147 | local_widget = SWIN_WIDGETNUM(bridge); | 156 | local_widget = SWIN_WIDGETNUM(bridge); |
| 148 | |||
| 149 | vector = sn_irq_info->irq_irq; | 157 | vector = sn_irq_info->irq_irq; |
| 158 | |||
| 159 | /* Make use of SAL_INTR_REDIRECT if PROM supports it */ | ||
| 160 | status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice); | ||
| 161 | if (!status) { | ||
| 162 | new_irq_info = sn_irq_info; | ||
| 163 | goto finish_up; | ||
| 164 | } | ||
| 165 | |||
| 166 | /* | ||
| 167 | * PROM does not support SAL_INTR_REDIRECT, or it failed. | ||
| 168 | * Revert to old method. | ||
| 169 | */ | ||
| 170 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); | ||
| 171 | if (new_irq_info == NULL) | ||
| 172 | return NULL; | ||
| 173 | |||
| 174 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); | ||
| 175 | |||
| 150 | /* Free the old PROM new_irq_info structure */ | 176 | /* Free the old PROM new_irq_info structure */ |
| 151 | sn_intr_free(local_nasid, local_widget, new_irq_info); | 177 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
| 152 | unregister_intr_pda(new_irq_info); | 178 | unregister_intr_pda(new_irq_info); |
| @@ -162,11 +188,18 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, | |||
| 162 | return NULL; | 188 | return NULL; |
| 163 | } | 189 | } |
| 164 | 190 | ||
| 191 | register_intr_pda(new_irq_info); | ||
| 192 | spin_lock(&sn_irq_info_lock); | ||
| 193 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | ||
| 194 | spin_unlock(&sn_irq_info_lock); | ||
| 195 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
| 196 | |||
| 197 | |||
| 198 | finish_up: | ||
| 165 | /* Update kernels new_irq_info with new target info */ | 199 | /* Update kernels new_irq_info with new target info */ |
| 166 | cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, | 200 | cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, |
| 167 | new_irq_info->irq_slice); | 201 | new_irq_info->irq_slice); |
| 168 | new_irq_info->irq_cpuid = cpuid; | 202 | new_irq_info->irq_cpuid = cpuid; |
| 169 | register_intr_pda(new_irq_info); | ||
| 170 | 203 | ||
| 171 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; | 204 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; |
| 172 | 205 | ||
| @@ -178,11 +211,6 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, | |||
| 178 | pci_provider && pci_provider->target_interrupt) | 211 | pci_provider && pci_provider->target_interrupt) |
| 179 | (pci_provider->target_interrupt)(new_irq_info); | 212 | (pci_provider->target_interrupt)(new_irq_info); |
| 180 | 213 | ||
| 181 | spin_lock(&sn_irq_info_lock); | ||
| 182 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | ||
| 183 | spin_unlock(&sn_irq_info_lock); | ||
| 184 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); | ||
| 185 | |||
| 186 | #ifdef CONFIG_SMP | 214 | #ifdef CONFIG_SMP |
| 187 | cpuphys = cpu_physical_id(cpuid); | 215 | cpuphys = cpu_physical_id(cpuid); |
| 188 | set_irq_affinity_info((vector & 0xff), cpuphys, 0); | 216 | set_irq_affinity_info((vector & 0xff), cpuphys, 0); |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 601747b1e22a..5d318b579fb1 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
| @@ -46,6 +46,9 @@ DECLARE_PER_CPU(struct ptc_stats, ptcstats); | |||
| 46 | 46 | ||
| 47 | static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); | 47 | static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); |
| 48 | 48 | ||
| 49 | /* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */ | ||
| 50 | static int sn2_flush_opt = 0; | ||
| 51 | |||
| 49 | extern unsigned long | 52 | extern unsigned long |
| 50 | sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, | 53 | sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, |
| 51 | volatile unsigned long *, unsigned long, | 54 | volatile unsigned long *, unsigned long, |
| @@ -76,6 +79,8 @@ struct ptc_stats { | |||
| 76 | unsigned long shub_itc_clocks; | 79 | unsigned long shub_itc_clocks; |
| 77 | unsigned long shub_itc_clocks_max; | 80 | unsigned long shub_itc_clocks_max; |
| 78 | unsigned long shub_ptc_flushes_not_my_mm; | 81 | unsigned long shub_ptc_flushes_not_my_mm; |
| 82 | unsigned long shub_ipi_flushes; | ||
| 83 | unsigned long shub_ipi_flushes_itc_clocks; | ||
| 79 | }; | 84 | }; |
| 80 | 85 | ||
| 81 | #define sn2_ptctest 0 | 86 | #define sn2_ptctest 0 |
| @@ -121,6 +126,18 @@ void sn_tlb_migrate_finish(struct mm_struct *mm) | |||
| 121 | flush_tlb_mm(mm); | 126 | flush_tlb_mm(mm); |
| 122 | } | 127 | } |
| 123 | 128 | ||
| 129 | static void | ||
| 130 | sn2_ipi_flush_all_tlb(struct mm_struct *mm) | ||
| 131 | { | ||
| 132 | unsigned long itc; | ||
| 133 | |||
| 134 | itc = ia64_get_itc(); | ||
| 135 | smp_flush_tlb_cpumask(mm->cpu_vm_mask); | ||
| 136 | itc = ia64_get_itc() - itc; | ||
| 137 | __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; | ||
| 138 | __get_cpu_var(ptcstats).shub_ipi_flushes++; | ||
| 139 | } | ||
| 140 | |||
| 124 | /** | 141 | /** |
| 125 | * sn2_global_tlb_purge - globally purge translation cache of virtual address range | 142 | * sn2_global_tlb_purge - globally purge translation cache of virtual address range |
| 126 | * @mm: mm_struct containing virtual address range | 143 | * @mm: mm_struct containing virtual address range |
| @@ -154,7 +171,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
| 154 | unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; | 171 | unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; |
| 155 | short nasids[MAX_NUMNODES], nix; | 172 | short nasids[MAX_NUMNODES], nix; |
| 156 | nodemask_t nodes_flushed; | 173 | nodemask_t nodes_flushed; |
| 157 | int active, max_active, deadlock; | 174 | int active, max_active, deadlock, flush_opt = sn2_flush_opt; |
| 175 | |||
| 176 | if (flush_opt > 2) { | ||
| 177 | sn2_ipi_flush_all_tlb(mm); | ||
| 178 | return; | ||
| 179 | } | ||
| 158 | 180 | ||
| 159 | nodes_clear(nodes_flushed); | 181 | nodes_clear(nodes_flushed); |
| 160 | i = 0; | 182 | i = 0; |
| @@ -189,6 +211,12 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
| 189 | return; | 211 | return; |
| 190 | } | 212 | } |
| 191 | 213 | ||
| 214 | if (flush_opt == 2) { | ||
| 215 | sn2_ipi_flush_all_tlb(mm); | ||
| 216 | preempt_enable(); | ||
| 217 | return; | ||
| 218 | } | ||
| 219 | |||
| 192 | itc = ia64_get_itc(); | 220 | itc = ia64_get_itc(); |
| 193 | nix = 0; | 221 | nix = 0; |
| 194 | for_each_node_mask(cnode, nodes_flushed) | 222 | for_each_node_mask(cnode, nodes_flushed) |
| @@ -256,6 +284,8 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
| 256 | } | 284 | } |
| 257 | if (active >= max_active || i == (nix - 1)) { | 285 | if (active >= max_active || i == (nix - 1)) { |
| 258 | if ((deadlock = wait_piowc())) { | 286 | if ((deadlock = wait_piowc())) { |
| 287 | if (flush_opt == 1) | ||
| 288 | goto done; | ||
| 259 | sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); | 289 | sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); |
| 260 | if (reset_max_active_on_deadlock()) | 290 | if (reset_max_active_on_deadlock()) |
| 261 | max_active = 1; | 291 | max_active = 1; |
| @@ -267,6 +297,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
| 267 | start += (1UL << nbits); | 297 | start += (1UL << nbits); |
| 268 | } while (start < end); | 298 | } while (start < end); |
| 269 | 299 | ||
| 300 | done: | ||
| 270 | itc2 = ia64_get_itc() - itc2; | 301 | itc2 = ia64_get_itc() - itc2; |
| 271 | __get_cpu_var(ptcstats).shub_itc_clocks += itc2; | 302 | __get_cpu_var(ptcstats).shub_itc_clocks += itc2; |
| 272 | if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) | 303 | if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) |
| @@ -279,6 +310,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, | |||
| 279 | 310 | ||
| 280 | spin_unlock_irqrestore(PTC_LOCK(shub1), flags); | 311 | spin_unlock_irqrestore(PTC_LOCK(shub1), flags); |
| 281 | 312 | ||
| 313 | if (flush_opt == 1 && deadlock) { | ||
| 314 | __get_cpu_var(ptcstats).deadlocks++; | ||
| 315 | sn2_ipi_flush_all_tlb(mm); | ||
| 316 | } | ||
| 317 | |||
| 282 | preempt_enable(); | 318 | preempt_enable(); |
| 283 | } | 319 | } |
| 284 | 320 | ||
| @@ -425,24 +461,42 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) | |||
| 425 | 461 | ||
| 426 | if (!cpu) { | 462 | if (!cpu) { |
| 427 | seq_printf(file, | 463 | seq_printf(file, |
| 428 | "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n"); | 464 | "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n"); |
| 429 | seq_printf(file, "# ptctest %d\n", sn2_ptctest); | 465 | seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); |
| 430 | } | 466 | } |
| 431 | 467 | ||
| 432 | if (cpu < NR_CPUS && cpu_online(cpu)) { | 468 | if (cpu < NR_CPUS && cpu_online(cpu)) { |
| 433 | stat = &per_cpu(ptcstats, cpu); | 469 | stat = &per_cpu(ptcstats, cpu); |
| 434 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | 470 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, |
| 435 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | 471 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, |
| 436 | stat->deadlocks, | 472 | stat->deadlocks, |
| 437 | 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | 473 | 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, |
| 438 | 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | 474 | 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, |
| 439 | 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, | 475 | 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, |
| 440 | stat->shub_ptc_flushes_not_my_mm, | 476 | stat->shub_ptc_flushes_not_my_mm, |
| 441 | stat->deadlocks2); | 477 | stat->deadlocks2, |
| 478 | stat->shub_ipi_flushes, | ||
| 479 | 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec); | ||
| 442 | } | 480 | } |
| 443 | return 0; | 481 | return 0; |
| 444 | } | 482 | } |
| 445 | 483 | ||
| 484 | static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data) | ||
| 485 | { | ||
| 486 | int cpu; | ||
| 487 | char optstr[64]; | ||
| 488 | |||
| 489 | if (copy_from_user(optstr, user, count)) | ||
| 490 | return -EFAULT; | ||
| 491 | optstr[count - 1] = '\0'; | ||
| 492 | sn2_flush_opt = simple_strtoul(optstr, NULL, 0); | ||
| 493 | |||
| 494 | for_each_online_cpu(cpu) | ||
| 495 | memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats)); | ||
| 496 | |||
| 497 | return count; | ||
| 498 | } | ||
| 499 | |||
| 446 | static struct seq_operations sn2_ptc_seq_ops = { | 500 | static struct seq_operations sn2_ptc_seq_ops = { |
| 447 | .start = sn2_ptc_seq_start, | 501 | .start = sn2_ptc_seq_start, |
| 448 | .next = sn2_ptc_seq_next, | 502 | .next = sn2_ptc_seq_next, |
| @@ -458,6 +512,7 @@ static int sn2_ptc_proc_open(struct inode *inode, struct file *file) | |||
| 458 | static const struct file_operations proc_sn2_ptc_operations = { | 512 | static const struct file_operations proc_sn2_ptc_operations = { |
| 459 | .open = sn2_ptc_proc_open, | 513 | .open = sn2_ptc_proc_open, |
| 460 | .read = seq_read, | 514 | .read = seq_read, |
| 515 | .write = sn2_ptc_proc_write, | ||
| 461 | .llseek = seq_lseek, | 516 | .llseek = seq_lseek, |
| 462 | .release = seq_release, | 517 | .release = seq_release, |
| 463 | }; | 518 | }; |
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c index c09160383a53..6e55cfb9c65a 100644 --- a/drivers/char/mmtimer.c +++ b/drivers/char/mmtimer.c | |||
| @@ -705,15 +705,13 @@ static int __init mmtimer_init(void) | |||
| 705 | maxn++; | 705 | maxn++; |
| 706 | 706 | ||
| 707 | /* Allocate list of node ptrs to mmtimer_t's */ | 707 | /* Allocate list of node ptrs to mmtimer_t's */ |
| 708 | timers = kmalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL); | 708 | timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL); |
| 709 | if (timers == NULL) { | 709 | if (timers == NULL) { |
| 710 | printk(KERN_ERR "%s: failed to allocate memory for device\n", | 710 | printk(KERN_ERR "%s: failed to allocate memory for device\n", |
| 711 | MMTIMER_NAME); | 711 | MMTIMER_NAME); |
| 712 | goto out3; | 712 | goto out3; |
| 713 | } | 713 | } |
| 714 | 714 | ||
| 715 | memset(timers,0,(sizeof(mmtimer_t *)*maxn)); | ||
| 716 | |||
| 717 | /* Allocate mmtimer_t's for each online node */ | 715 | /* Allocate mmtimer_t's for each online node */ |
| 718 | for_each_online_node(node) { | 716 | for_each_online_node(node) { |
| 719 | timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); | 717 | timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); |
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h index 27f9df6b9145..c054d7a9aaa7 100644 --- a/include/asm-ia64/hw_irq.h +++ b/include/asm-ia64/hw_irq.h | |||
| @@ -66,6 +66,7 @@ extern int ia64_last_device_vector; | |||
| 66 | #define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ | 66 | #define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ |
| 67 | #define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ | 67 | #define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ |
| 68 | #define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ | 68 | #define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ |
| 69 | #define IA64_IPI_LOCAL_TLB_FLUSH 0xfc /* SMP flush local TLB */ | ||
| 69 | #define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */ | 70 | #define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */ |
| 70 | #define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */ | 71 | #define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */ |
| 71 | 72 | ||
diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index 20f98f1751a1..421cb6b62a7c 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h | |||
| @@ -83,7 +83,7 @@ extern int gsi_to_irq (unsigned int gsi); | |||
| 83 | extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, | 83 | extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, |
| 84 | unsigned long trigger); | 84 | unsigned long trigger); |
| 85 | extern void iosapic_unregister_intr (unsigned int irq); | 85 | extern void iosapic_unregister_intr (unsigned int irq); |
| 86 | extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, | 86 | extern void __devinit iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, |
| 87 | unsigned long polarity, | 87 | unsigned long polarity, |
| 88 | unsigned long trigger); | 88 | unsigned long trigger); |
| 89 | extern int __init iosapic_register_platform_intr (u32 int_type, | 89 | extern int __init iosapic_register_platform_intr (u32 int_type, |
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 2c4004eb5a68..291e8ceed6e6 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h | |||
| @@ -106,6 +106,7 @@ | |||
| 106 | /* interrupt handling */ | 106 | /* interrupt handling */ |
| 107 | #define SAL_INTR_ALLOC 1 | 107 | #define SAL_INTR_ALLOC 1 |
| 108 | #define SAL_INTR_FREE 2 | 108 | #define SAL_INTR_FREE 2 |
| 109 | #define SAL_INTR_REDIRECT 3 | ||
| 109 | 110 | ||
| 110 | /* | 111 | /* |
| 111 | * operations available on the generic SN_SAL_SYSCTL_OP | 112 | * operations available on the generic SN_SAL_SYSCTL_OP |
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index d28147506585..7d0241db622b 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h | |||
| @@ -85,6 +85,7 @@ struct thread_info { | |||
| 85 | #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ | 85 | #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ |
| 86 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ | 86 | #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ |
| 87 | #define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */ | 87 | #define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */ |
| 88 | #define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */ | ||
| 88 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ | 89 | #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ |
| 89 | #define TIF_MEMDIE 17 | 90 | #define TIF_MEMDIE 17 |
| 90 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ | 91 | #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ |
| @@ -96,6 +97,7 @@ struct thread_info { | |||
| 96 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 97 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
| 97 | #define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP) | 98 | #define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP) |
| 98 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 99 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
| 100 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | ||
| 99 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 101 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
| 100 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 102 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
| 101 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 103 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
| @@ -104,7 +106,7 @@ struct thread_info { | |||
| 104 | #define _TIF_FREEZE (1 << TIF_FREEZE) | 106 | #define _TIF_FREEZE (1 << TIF_FREEZE) |
| 105 | 107 | ||
| 106 | /* "work to do on user-return" bits */ | 108 | /* "work to do on user-return" bits */ |
| 107 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) | 109 | #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_RESTORE_SIGMASK) |
| 108 | /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ | 110 | /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ |
| 109 | #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) | 111 | #define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) |
| 110 | 112 | ||
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index cf9acb9bb1fb..e37f9fbf33af 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h | |||
| @@ -27,9 +27,11 @@ extern void local_flush_tlb_all (void); | |||
| 27 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
| 28 | extern void smp_flush_tlb_all (void); | 28 | extern void smp_flush_tlb_all (void); |
| 29 | extern void smp_flush_tlb_mm (struct mm_struct *mm); | 29 | extern void smp_flush_tlb_mm (struct mm_struct *mm); |
| 30 | extern void smp_flush_tlb_cpumask (cpumask_t xcpumask); | ||
| 30 | # define flush_tlb_all() smp_flush_tlb_all() | 31 | # define flush_tlb_all() smp_flush_tlb_all() |
| 31 | #else | 32 | #else |
| 32 | # define flush_tlb_all() local_flush_tlb_all() | 33 | # define flush_tlb_all() local_flush_tlb_all() |
| 34 | # define smp_flush_tlb_cpumask(m) local_flush_tlb_all() | ||
| 33 | #endif | 35 | #endif |
| 34 | 36 | ||
| 35 | static inline void | 37 | static inline void |
| @@ -94,6 +96,15 @@ flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end | |||
| 94 | */ | 96 | */ |
| 95 | } | 97 | } |
| 96 | 98 | ||
| 99 | /* | ||
| 100 | * Flush the local TLB. Invoked from another cpu using an IPI. | ||
| 101 | */ | ||
| 102 | #ifdef CONFIG_SMP | ||
| 103 | void smp_local_flush_tlb(void); | ||
| 104 | #else | ||
| 105 | #define smp_local_flush_tlb() | ||
| 106 | #endif | ||
| 107 | |||
| 97 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ | 108 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ |
| 98 | 109 | ||
| 99 | #endif /* _ASM_IA64_TLBFLUSH_H */ | 110 | #endif /* _ASM_IA64_TLBFLUSH_H */ |
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index a9e1fa4cac4d..861c8ec87b09 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h | |||
| @@ -283,7 +283,8 @@ | |||
| 283 | #define __NR_readlinkat 1291 | 283 | #define __NR_readlinkat 1291 |
| 284 | #define __NR_fchmodat 1292 | 284 | #define __NR_fchmodat 1292 |
| 285 | #define __NR_faccessat 1293 | 285 | #define __NR_faccessat 1293 |
| 286 | /* 1294, 1295 reserved for pselect/ppoll */ | 286 | #define __NR_pselect6 1294 |
| 287 | #define __NR_ppoll 1295 | ||
| 287 | #define __NR_unshare 1296 | 288 | #define __NR_unshare 1296 |
| 288 | #define __NR_splice 1297 | 289 | #define __NR_splice 1297 |
| 289 | #define __NR_set_robust_list 1298 | 290 | #define __NR_set_robust_list 1298 |
| @@ -300,6 +301,7 @@ | |||
| 300 | #define NR_syscalls 281 /* length of syscall table */ | 301 | #define NR_syscalls 281 /* length of syscall table */ |
| 301 | 302 | ||
| 302 | #define __ARCH_WANT_SYS_RT_SIGACTION | 303 | #define __ARCH_WANT_SYS_RT_SIGACTION |
| 304 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | ||
| 303 | 305 | ||
| 304 | #ifdef CONFIG_IA32_SUPPORT | 306 | #ifdef CONFIG_IA32_SUPPORT |
| 305 | # define __ARCH_WANT_SYS_FADVISE64 | 307 | # define __ARCH_WANT_SYS_FADVISE64 |
| @@ -310,6 +312,7 @@ | |||
| 310 | # define __ARCH_WANT_SYS_OLDUMOUNT | 312 | # define __ARCH_WANT_SYS_OLDUMOUNT |
| 311 | # define __ARCH_WANT_SYS_SIGPENDING | 313 | # define __ARCH_WANT_SYS_SIGPENDING |
| 312 | # define __ARCH_WANT_SYS_SIGPROCMASK | 314 | # define __ARCH_WANT_SYS_SIGPROCMASK |
| 315 | # define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND | ||
| 313 | # define __ARCH_WANT_COMPAT_SYS_TIME | 316 | # define __ARCH_WANT_COMPAT_SYS_TIME |
| 314 | #endif | 317 | #endif |
| 315 | 318 | ||
