aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2012-05-14 18:00:12 -0400
committerTony Luck <tony.luck@intel.com>2012-05-16 17:09:55 -0400
commit4035c6db5a9dedf5e79c502bf39389a0b714cb38 (patch)
tree5f99e4668e7edcd8138908b5a40cf31ce1d24b93 /arch/ia64
parent98e4cff73a18af27f0d40d0b0d37f105dfc1994a (diff)
[IA64] Liberate the signal layer from IA64 assembler
Currently IA64 has a assembler implementation of sigrtprocmask. Having a single architecture implement this in assembler language is a serious maintenance problem that inhibits further evolution of the signal subsystem. Everyone who wants to do deep changes to signals would need to learn that assembler language. Whatever performance improvements IA64 gets from this it cannot be worth the price in maintainability. We have some locking problems in signal that need to be fixed, but this roadblock needs to be removed first. So just disable the special assembler IA64 implementation and fall back to a normal syscall there. Acked-by: Matt Fleming <matt.fleming@intel.com> Signed-off-by: Andi Kleen <andi@firstfloor.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/fsys.S171
1 files changed, 1 insertions, 170 deletions
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index e2dfae24c0f4..e662f178b990 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -372,175 +372,6 @@ ENTRY(fsys_clock_gettime)
372END(fsys_clock_gettime) 372END(fsys_clock_gettime)
373 373
374/* 374/*
375 * long fsys_rt_sigprocmask (int how, sigset_t *set, sigset_t *oset, size_t sigsetsize).
376 */
377#if _NSIG_WORDS != 1
378# error Sorry, fsys_rt_sigprocmask() needs to be updated for _NSIG_WORDS != 1.
379#endif
380ENTRY(fsys_rt_sigprocmask)
381 .prologue
382 .altrp b6
383 .body
384
385 add r2=IA64_TASK_BLOCKED_OFFSET,r16
386 add r9=TI_FLAGS+IA64_TASK_SIZE,r16
387 cmp4.ltu p6,p0=SIG_SETMASK,r32
388
389 cmp.ne p15,p0=r0,r34 // oset != NULL?
390 tnat.nz p8,p0=r34
391 add r31=IA64_TASK_SIGHAND_OFFSET,r16
392 ;;
393 ld8 r3=[r2] // read/prefetch current->blocked
394 ld4 r9=[r9]
395 tnat.nz.or p6,p0=r35
396
397 cmp.ne.or p6,p0=_NSIG_WORDS*8,r35
398 tnat.nz.or p6,p0=r32
399(p6) br.spnt.few .fail_einval // fail with EINVAL
400 ;;
401#ifdef CONFIG_SMP
402 ld8 r31=[r31] // r31 <- current->sighand
403#endif
404 and r9=TIF_ALLWORK_MASK,r9
405 tnat.nz.or p8,p0=r33
406 ;;
407 cmp.ne p7,p0=0,r9
408 cmp.eq p6,p0=r0,r33 // set == NULL?
409 add r31=IA64_SIGHAND_SIGLOCK_OFFSET,r31 // r31 <- current->sighand->siglock
410(p8) br.spnt.few .fail_efault // fail with EFAULT
411(p7) br.spnt.many fsys_fallback_syscall // got pending kernel work...
412(p6) br.dpnt.many .store_mask // -> short-circuit to just reading the signal mask
413
414 /* Argh, we actually have to do some work and _update_ the signal mask: */
415
416EX(.fail_efault, probe.r.fault r33, 3) // verify user has read-access to *set
417EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
418 mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))
419 ;;
420
421 RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
422 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
423 mov r8=EINVAL // default to EINVAL
424
425#ifdef CONFIG_SMP
426 // __ticket_spin_trylock(r31)
427 ld4 r17=[r31]
428 ;;
429 mov.m ar.ccv=r17
430 extr.u r9=r17,17,15
431 adds r19=1,r17
432 extr.u r18=r17,0,15
433 ;;
434 cmp.eq p6,p7=r9,r18
435 ;;
436(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv
437(p6) dep.z r20=r19,1,15 // next serving ticket for unlock
438(p7) br.cond.spnt.many .lock_contention
439 ;;
440 cmp4.eq p0,p7=r9,r17
441 adds r31=2,r31
442(p7) br.cond.spnt.many .lock_contention
443 ld8 r3=[r2] // re-read current->blocked now that we hold the lock
444 ;;
445#else
446 ld8 r3=[r2] // re-read current->blocked now that we hold the lock
447#endif
448 add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
449 add r19=IA64_TASK_SIGNAL_OFFSET,r16
450 cmp4.eq p6,p0=SIG_BLOCK,r32
451 ;;
452 ld8 r19=[r19] // r19 <- current->signal
453 cmp4.eq p7,p0=SIG_UNBLOCK,r32
454 cmp4.eq p8,p0=SIG_SETMASK,r32
455 ;;
456 ld8 r18=[r18] // r18 <- current->pending.signal
457 .pred.rel.mutex p6,p7,p8
458(p6) or r14=r3,r14 // SIG_BLOCK
459(p7) andcm r14=r3,r14 // SIG_UNBLOCK
460
461(p8) mov r14=r14 // SIG_SETMASK
462(p6) mov r8=0 // clear error code
463 // recalc_sigpending()
464 add r17=IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,r19
465
466 add r19=IA64_SIGNAL_SHARED_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r19
467 ;;
468 ld4 r17=[r17] // r17 <- current->signal->group_stop_count
469(p7) mov r8=0 // clear error code
470
471 ld8 r19=[r19] // r19 <- current->signal->shared_pending
472 ;;
473 cmp4.gt p6,p7=r17,r0 // p6/p7 <- (current->signal->group_stop_count > 0)?
474(p8) mov r8=0 // clear error code
475
476 or r18=r18,r19 // r18 <- current->pending | current->signal->shared_pending
477 ;;
478 // r18 <- (current->pending | current->signal->shared_pending) & ~current->blocked:
479 andcm r18=r18,r14
480 add r9=TI_FLAGS+IA64_TASK_SIZE,r16
481 ;;
482
483(p7) cmp.ne.or.andcm p6,p7=r18,r0 // p6/p7 <- signal pending
484 mov r19=0 // i must not leak kernel bits...
485(p6) br.cond.dpnt.many .sig_pending
486 ;;
487
4881: ld4 r17=[r9] // r17 <- current->thread_info->flags
489 ;;
490 mov ar.ccv=r17
491 and r18=~_TIF_SIGPENDING,r17 // r18 <- r17 & ~(1 << TIF_SIGPENDING)
492 ;;
493
494 st8 [r2]=r14 // update current->blocked with new mask
495 cmpxchg4.acq r8=[r9],r18,ar.ccv // current->thread_info->flags <- r18
496 ;;
497 cmp.ne p6,p0=r17,r8 // update failed?
498(p6) br.cond.spnt.few 1b // yes -> retry
499
500#ifdef CONFIG_SMP
501 // __ticket_spin_unlock(r31)
502 st2.rel [r31]=r20
503 mov r20=0 // i must not leak kernel bits...
504#endif
505 SSM_PSR_I(p0, p9, r31)
506 ;;
507
508 srlz.d // ensure psr.i is set again
509 mov r18=0 // i must not leak kernel bits...
510
511.store_mask:
512EX(.fail_efault, (p15) probe.w.fault r34, 3) // verify user has write-access to *oset
513EX(.fail_efault, (p15) st8 [r34]=r3)
514 mov r2=0 // i must not leak kernel bits...
515 mov r3=0 // i must not leak kernel bits...
516 mov r8=0 // return 0
517 mov r9=0 // i must not leak kernel bits...
518 mov r14=0 // i must not leak kernel bits...
519 mov r17=0 // i must not leak kernel bits...
520 mov r31=0 // i must not leak kernel bits...
521 FSYS_RETURN
522
523.sig_pending:
524#ifdef CONFIG_SMP
525 // __ticket_spin_unlock(r31)
526 st2.rel [r31]=r20 // release the lock
527#endif
528 SSM_PSR_I(p0, p9, r17)
529 ;;
530 srlz.d
531 br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall
532
533#ifdef CONFIG_SMP
534.lock_contention:
535 /* Rather than spinning here, fall back on doing a heavy-weight syscall. */
536 SSM_PSR_I(p0, p9, r17)
537 ;;
538 srlz.d
539 br.sptk.many fsys_fallback_syscall
540#endif
541END(fsys_rt_sigprocmask)
542
543/*
544 * fsys_getcpu doesn't use the third parameter in this implementation. It reads 375 * fsys_getcpu doesn't use the third parameter in this implementation. It reads
545 * current_thread_info()->cpu and corresponding node in cpu_to_node_map. 376 * current_thread_info()->cpu and corresponding node in cpu_to_node_map.
546 */ 377 */
@@ -920,7 +751,7 @@ paravirt_fsyscall_table:
920 data8 0 // sigaltstack 751 data8 0 // sigaltstack
921 data8 0 // rt_sigaction 752 data8 0 // rt_sigaction
922 data8 0 // rt_sigpending 753 data8 0 // rt_sigpending
923 data8 fsys_rt_sigprocmask // rt_sigprocmask 754 data8 0 // rt_sigprocmask
924 data8 0 // rt_sigqueueinfo // 1180 755 data8 0 // rt_sigqueueinfo // 1180
925 data8 0 // rt_sigreturn 756 data8 0 // rt_sigreturn
926 data8 0 // rt_sigsuspend 757 data8 0 // rt_sigsuspend