diff options
Diffstat (limited to 'arch/alpha/kernel')
-rw-r--r-- | arch/alpha/kernel/entry.S | 20 | ||||
-rw-r--r-- | arch/alpha/kernel/perf_event.c | 128 | ||||
-rw-r--r-- | arch/alpha/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/alpha/kernel/signal.c | 56 | ||||
-rw-r--r-- | arch/alpha/kernel/systbls.S | 2 |
5 files changed, 110 insertions, 98 deletions
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index ab1ee0ab082b..6d159cee5f2f 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S | |||
@@ -73,8 +73,6 @@ | |||
73 | ldq $20, HAE_REG($19); \ | 73 | ldq $20, HAE_REG($19); \ |
74 | stq $21, HAE_CACHE($19); \ | 74 | stq $21, HAE_CACHE($19); \ |
75 | stq $21, 0($20); \ | 75 | stq $21, 0($20); \ |
76 | ldq $0, 0($sp); \ | ||
77 | ldq $1, 8($sp); \ | ||
78 | 99:; \ | 76 | 99:; \ |
79 | ldq $19, 72($sp); \ | 77 | ldq $19, 72($sp); \ |
80 | ldq $20, 80($sp); \ | 78 | ldq $20, 80($sp); \ |
@@ -316,7 +314,7 @@ ret_from_sys_call: | |||
316 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ | 314 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ |
317 | ldq $0, SP_OFF($sp) | 315 | ldq $0, SP_OFF($sp) |
318 | and $0, 8, $0 | 316 | and $0, 8, $0 |
319 | beq $0, restore_all | 317 | beq $0, ret_to_kernel |
320 | ret_to_user: | 318 | ret_to_user: |
321 | /* Make sure need_resched and sigpending don't change between | 319 | /* Make sure need_resched and sigpending don't change between |
322 | sampling and the rti. */ | 320 | sampling and the rti. */ |
@@ -329,6 +327,11 @@ restore_all: | |||
329 | RESTORE_ALL | 327 | RESTORE_ALL |
330 | call_pal PAL_rti | 328 | call_pal PAL_rti |
331 | 329 | ||
330 | ret_to_kernel: | ||
331 | lda $16, 7 | ||
332 | call_pal PAL_swpipl | ||
333 | br restore_all | ||
334 | |||
332 | .align 3 | 335 | .align 3 |
333 | $syscall_error: | 336 | $syscall_error: |
334 | /* | 337 | /* |
@@ -657,7 +660,7 @@ kernel_thread: | |||
657 | /* We don't actually care for a3 success widgetry in the kernel. | 660 | /* We don't actually care for a3 success widgetry in the kernel. |
658 | Not for positive errno values. */ | 661 | Not for positive errno values. */ |
659 | stq $0, 0($sp) /* $0 */ | 662 | stq $0, 0($sp) /* $0 */ |
660 | br restore_all | 663 | br ret_to_kernel |
661 | .end kernel_thread | 664 | .end kernel_thread |
662 | 665 | ||
663 | /* | 666 | /* |
@@ -912,15 +915,6 @@ sys_execve: | |||
912 | .end sys_execve | 915 | .end sys_execve |
913 | 916 | ||
914 | .align 4 | 917 | .align 4 |
915 | .globl osf_sigprocmask | ||
916 | .ent osf_sigprocmask | ||
917 | osf_sigprocmask: | ||
918 | .prologue 0 | ||
919 | mov $sp, $18 | ||
920 | jmp $31, sys_osf_sigprocmask | ||
921 | .end osf_sigprocmask | ||
922 | |||
923 | .align 4 | ||
924 | .globl alpha_ni_syscall | 918 | .globl alpha_ni_syscall |
925 | .ent alpha_ni_syscall | 919 | .ent alpha_ni_syscall |
926 | alpha_ni_syscall: | 920 | alpha_ni_syscall: |
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 85d8e4f58c83..1cc49683fb69 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -307,7 +307,7 @@ again: | |||
307 | new_raw_count) != prev_raw_count) | 307 | new_raw_count) != prev_raw_count) |
308 | goto again; | 308 | goto again; |
309 | 309 | ||
310 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; | 310 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; |
311 | 311 | ||
312 | /* It is possible on very rare occasions that the PMC has overflowed | 312 | /* It is possible on very rare occasions that the PMC has overflowed |
313 | * but the interrupt is yet to come. Detect and fix this situation. | 313 | * but the interrupt is yet to come. Detect and fix this situation. |
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
402 | struct hw_perf_event *hwc = &pe->hw; | 402 | struct hw_perf_event *hwc = &pe->hw; |
403 | int idx = hwc->idx; | 403 | int idx = hwc->idx; |
404 | 404 | ||
405 | if (cpuc->current_idx[j] != PMC_NO_INDEX) { | 405 | if (cpuc->current_idx[j] == PMC_NO_INDEX) { |
406 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | 406 | alpha_perf_event_set_period(pe, hwc, idx); |
407 | continue; | 407 | cpuc->current_idx[j] = idx; |
408 | } | 408 | } |
409 | 409 | ||
410 | alpha_perf_event_set_period(pe, hwc, idx); | 410 | if (!(hwc->state & PERF_HES_STOPPED)) |
411 | cpuc->current_idx[j] = idx; | 411 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); |
412 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | ||
413 | } | 412 | } |
414 | cpuc->config = cpuc->event[0]->hw.config_base; | 413 | cpuc->config = cpuc->event[0]->hw.config_base; |
415 | } | 414 | } |
@@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
420 | * - this function is called from outside this module via the pmu struct | 419 | * - this function is called from outside this module via the pmu struct |
421 | * returned from perf event initialisation. | 420 | * returned from perf event initialisation. |
422 | */ | 421 | */ |
423 | static int alpha_pmu_enable(struct perf_event *event) | 422 | static int alpha_pmu_add(struct perf_event *event, int flags) |
424 | { | 423 | { |
425 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 424 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
425 | struct hw_perf_event *hwc = &event->hw; | ||
426 | int n0; | 426 | int n0; |
427 | int ret; | 427 | int ret; |
428 | unsigned long flags; | 428 | unsigned long irq_flags; |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * The Sparc code has the IRQ disable first followed by the perf | 431 | * The Sparc code has the IRQ disable first followed by the perf |
@@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
435 | * nevertheless we disable the PMCs first to enable a potential | 435 | * nevertheless we disable the PMCs first to enable a potential |
436 | * final PMI to occur before we disable interrupts. | 436 | * final PMI to occur before we disable interrupts. |
437 | */ | 437 | */ |
438 | perf_disable(); | 438 | perf_pmu_disable(event->pmu); |
439 | local_irq_save(flags); | 439 | local_irq_save(irq_flags); |
440 | 440 | ||
441 | /* Default to error to be returned */ | 441 | /* Default to error to be returned */ |
442 | ret = -EAGAIN; | 442 | ret = -EAGAIN; |
@@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | local_irq_restore(flags); | 458 | hwc->state = PERF_HES_UPTODATE; |
459 | perf_enable(); | 459 | if (!(flags & PERF_EF_START)) |
460 | hwc->state |= PERF_HES_STOPPED; | ||
461 | |||
462 | local_irq_restore(irq_flags); | ||
463 | perf_pmu_enable(event->pmu); | ||
460 | 464 | ||
461 | return ret; | 465 | return ret; |
462 | } | 466 | } |
@@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
467 | * - this function is called from outside this module via the pmu struct | 471 | * - this function is called from outside this module via the pmu struct |
468 | * returned from perf event initialisation. | 472 | * returned from perf event initialisation. |
469 | */ | 473 | */ |
470 | static void alpha_pmu_disable(struct perf_event *event) | 474 | static void alpha_pmu_del(struct perf_event *event, int flags) |
471 | { | 475 | { |
472 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 476 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
473 | struct hw_perf_event *hwc = &event->hw; | 477 | struct hw_perf_event *hwc = &event->hw; |
474 | unsigned long flags; | 478 | unsigned long irq_flags; |
475 | int j; | 479 | int j; |
476 | 480 | ||
477 | perf_disable(); | 481 | perf_pmu_disable(event->pmu); |
478 | local_irq_save(flags); | 482 | local_irq_save(irq_flags); |
479 | 483 | ||
480 | for (j = 0; j < cpuc->n_events; j++) { | 484 | for (j = 0; j < cpuc->n_events; j++) { |
481 | if (event == cpuc->event[j]) { | 485 | if (event == cpuc->event[j]) { |
@@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event) | |||
501 | } | 505 | } |
502 | } | 506 | } |
503 | 507 | ||
504 | local_irq_restore(flags); | 508 | local_irq_restore(irq_flags); |
505 | perf_enable(); | 509 | perf_pmu_enable(event->pmu); |
506 | } | 510 | } |
507 | 511 | ||
508 | 512 | ||
@@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event) | |||
514 | } | 518 | } |
515 | 519 | ||
516 | 520 | ||
517 | static void alpha_pmu_unthrottle(struct perf_event *event) | 521 | static void alpha_pmu_stop(struct perf_event *event, int flags) |
522 | { | ||
523 | struct hw_perf_event *hwc = &event->hw; | ||
524 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
525 | |||
526 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
527 | cpuc->idx_mask &= ~(1UL<<hwc->idx); | ||
528 | hwc->state |= PERF_HES_STOPPED; | ||
529 | } | ||
530 | |||
531 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
532 | alpha_perf_event_update(event, hwc, hwc->idx, 0); | ||
533 | hwc->state |= PERF_HES_UPTODATE; | ||
534 | } | ||
535 | |||
536 | if (cpuc->enabled) | ||
537 | wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); | ||
538 | } | ||
539 | |||
540 | |||
541 | static void alpha_pmu_start(struct perf_event *event, int flags) | ||
518 | { | 542 | { |
519 | struct hw_perf_event *hwc = &event->hw; | 543 | struct hw_perf_event *hwc = &event->hw; |
520 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 544 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
521 | 545 | ||
546 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | ||
547 | return; | ||
548 | |||
549 | if (flags & PERF_EF_RELOAD) { | ||
550 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
551 | alpha_perf_event_set_period(event, hwc, hwc->idx); | ||
552 | } | ||
553 | |||
554 | hwc->state = 0; | ||
555 | |||
522 | cpuc->idx_mask |= 1UL<<hwc->idx; | 556 | cpuc->idx_mask |= 1UL<<hwc->idx; |
523 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | 557 | if (cpuc->enabled) |
558 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | ||
524 | } | 559 | } |
525 | 560 | ||
526 | 561 | ||
@@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
642 | return 0; | 677 | return 0; |
643 | } | 678 | } |
644 | 679 | ||
645 | static const struct pmu pmu = { | ||
646 | .enable = alpha_pmu_enable, | ||
647 | .disable = alpha_pmu_disable, | ||
648 | .read = alpha_pmu_read, | ||
649 | .unthrottle = alpha_pmu_unthrottle, | ||
650 | }; | ||
651 | |||
652 | |||
653 | /* | 680 | /* |
654 | * Main entry point to initialise a HW performance event. | 681 | * Main entry point to initialise a HW performance event. |
655 | */ | 682 | */ |
656 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 683 | static int alpha_pmu_event_init(struct perf_event *event) |
657 | { | 684 | { |
658 | int err; | 685 | int err; |
659 | 686 | ||
687 | switch (event->attr.type) { | ||
688 | case PERF_TYPE_RAW: | ||
689 | case PERF_TYPE_HARDWARE: | ||
690 | case PERF_TYPE_HW_CACHE: | ||
691 | break; | ||
692 | |||
693 | default: | ||
694 | return -ENOENT; | ||
695 | } | ||
696 | |||
660 | if (!alpha_pmu) | 697 | if (!alpha_pmu) |
661 | return ERR_PTR(-ENODEV); | 698 | return -ENODEV; |
662 | 699 | ||
663 | /* Do the real initialisation work. */ | 700 | /* Do the real initialisation work. */ |
664 | err = __hw_perf_event_init(event); | 701 | err = __hw_perf_event_init(event); |
665 | 702 | ||
666 | if (err) | 703 | return err; |
667 | return ERR_PTR(err); | ||
668 | |||
669 | return &pmu; | ||
670 | } | 704 | } |
671 | 705 | ||
672 | |||
673 | |||
674 | /* | 706 | /* |
675 | * Main entry point - enable HW performance counters. | 707 | * Main entry point - enable HW performance counters. |
676 | */ | 708 | */ |
677 | void hw_perf_enable(void) | 709 | static void alpha_pmu_enable(struct pmu *pmu) |
678 | { | 710 | { |
679 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 711 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
680 | 712 | ||
@@ -700,7 +732,7 @@ void hw_perf_enable(void) | |||
700 | * Main entry point - disable HW performance counters. | 732 | * Main entry point - disable HW performance counters. |
701 | */ | 733 | */ |
702 | 734 | ||
703 | void hw_perf_disable(void) | 735 | static void alpha_pmu_disable(struct pmu *pmu) |
704 | { | 736 | { |
705 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 737 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
706 | 738 | ||
@@ -713,6 +745,17 @@ void hw_perf_disable(void) | |||
713 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 745 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
714 | } | 746 | } |
715 | 747 | ||
748 | static struct pmu pmu = { | ||
749 | .pmu_enable = alpha_pmu_enable, | ||
750 | .pmu_disable = alpha_pmu_disable, | ||
751 | .event_init = alpha_pmu_event_init, | ||
752 | .add = alpha_pmu_add, | ||
753 | .del = alpha_pmu_del, | ||
754 | .start = alpha_pmu_start, | ||
755 | .stop = alpha_pmu_stop, | ||
756 | .read = alpha_pmu_read, | ||
757 | }; | ||
758 | |||
716 | 759 | ||
717 | /* | 760 | /* |
718 | * Main entry point - don't know when this is called but it | 761 | * Main entry point - don't know when this is called but it |
@@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
766 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 809 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
767 | 810 | ||
768 | /* la_ptr is the counter that overflowed. */ | 811 | /* la_ptr is the counter that overflowed. */ |
769 | if (unlikely(la_ptr >= perf_max_events)) { | 812 | if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { |
770 | /* This should never occur! */ | 813 | /* This should never occur! */ |
771 | irq_err_count++; | 814 | irq_err_count++; |
772 | pr_warning("PMI: silly index %ld\n", la_ptr); | 815 | pr_warning("PMI: silly index %ld\n", la_ptr); |
@@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
807 | /* Interrupts coming too quickly; "throttle" the | 850 | /* Interrupts coming too quickly; "throttle" the |
808 | * counter, i.e., disable it for a little while. | 851 | * counter, i.e., disable it for a little while. |
809 | */ | 852 | */ |
810 | cpuc->idx_mask &= ~(1UL<<idx); | 853 | alpha_pmu_stop(event, 0); |
811 | } | 854 | } |
812 | } | 855 | } |
813 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | 856 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
@@ -837,6 +880,7 @@ void __init init_hw_perf_events(void) | |||
837 | 880 | ||
838 | /* And set up PMU specification */ | 881 | /* And set up PMU specification */ |
839 | alpha_pmu = &ev67_pmu; | 882 | alpha_pmu = &ev67_pmu; |
840 | perf_max_events = alpha_pmu->num_pmcs; | 883 | |
884 | perf_pmu_register(&pmu); | ||
841 | } | 885 | } |
842 | 886 | ||
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 842dba308eab..3ec35066f1dc 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) | |||
356 | dest[27] = pt->r27; | 356 | dest[27] = pt->r27; |
357 | dest[28] = pt->r28; | 357 | dest[28] = pt->r28; |
358 | dest[29] = pt->gp; | 358 | dest[29] = pt->gp; |
359 | dest[30] = rdusp(); | 359 | dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; |
360 | dest[31] = pt->pc; | 360 | dest[31] = pt->pc; |
361 | 361 | ||
362 | /* Once upon a time this was the PS value. Which is stupid | 362 | /* Once upon a time this was the PS value. Which is stupid |
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 0f6b51ae865a..6f7feb5db271 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c | |||
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *, | |||
41 | /* | 41 | /* |
42 | * The OSF/1 sigprocmask calling sequence is different from the | 42 | * The OSF/1 sigprocmask calling sequence is different from the |
43 | * C sigprocmask() sequence.. | 43 | * C sigprocmask() sequence.. |
44 | * | ||
45 | * how: | ||
46 | * 1 - SIG_BLOCK | ||
47 | * 2 - SIG_UNBLOCK | ||
48 | * 3 - SIG_SETMASK | ||
49 | * | ||
50 | * We change the range to -1 .. 1 in order to let gcc easily | ||
51 | * use the conditional move instructions. | ||
52 | * | ||
53 | * Note that we don't need to acquire the kernel lock for SMP | ||
54 | * operation, as all of this is local to this thread. | ||
55 | */ | 44 | */ |
56 | SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, | 45 | SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) |
57 | struct pt_regs *, regs) | ||
58 | { | 46 | { |
59 | unsigned long oldmask = -EINVAL; | 47 | sigset_t oldmask; |
60 | 48 | sigset_t mask; | |
61 | if ((unsigned long)how-1 <= 2) { | 49 | unsigned long res; |
62 | long sign = how-2; /* -1 .. 1 */ | 50 | |
63 | unsigned long block, unblock; | 51 | siginitset(&mask, newmask & _BLOCKABLE); |
64 | 52 | res = sigprocmask(how, &mask, &oldmask); | |
65 | newmask &= _BLOCKABLE; | 53 | if (!res) { |
66 | spin_lock_irq(¤t->sighand->siglock); | 54 | force_successful_syscall_return(); |
67 | oldmask = current->blocked.sig[0]; | 55 | res = oldmask.sig[0]; |
68 | |||
69 | unblock = oldmask & ~newmask; | ||
70 | block = oldmask | newmask; | ||
71 | if (!sign) | ||
72 | block = unblock; | ||
73 | if (sign <= 0) | ||
74 | newmask = block; | ||
75 | if (_NSIG_WORDS > 1 && sign > 0) | ||
76 | sigemptyset(¤t->blocked); | ||
77 | current->blocked.sig[0] = newmask; | ||
78 | recalc_sigpending(); | ||
79 | spin_unlock_irq(¤t->sighand->siglock); | ||
80 | |||
81 | regs->r0 = 0; /* special no error return */ | ||
82 | } | 56 | } |
83 | return oldmask; | 57 | return res; |
84 | } | 58 | } |
85 | 59 | ||
86 | SYSCALL_DEFINE3(osf_sigaction, int, sig, | 60 | SYSCALL_DEFINE3(osf_sigaction, int, sig, |
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, | |||
94 | old_sigset_t mask; | 68 | old_sigset_t mask; |
95 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 69 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
96 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 70 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
97 | __get_user(new_ka.sa.sa_flags, &act->sa_flags)) | 71 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
72 | __get_user(mask, &act->sa_mask)) | ||
98 | return -EFAULT; | 73 | return -EFAULT; |
99 | __get_user(mask, &act->sa_mask); | ||
100 | siginitset(&new_ka.sa.sa_mask, mask); | 74 | siginitset(&new_ka.sa.sa_mask, mask); |
101 | new_ka.ka_restorer = NULL; | 75 | new_ka.ka_restorer = NULL; |
102 | } | 76 | } |
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, | |||
106 | if (!ret && oact) { | 80 | if (!ret && oact) { |
107 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | 81 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
108 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 82 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
109 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) | 83 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
84 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
110 | return -EFAULT; | 85 | return -EFAULT; |
111 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
112 | } | 86 | } |
113 | 87 | ||
114 | return ret; | 88 | return ret; |
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index ce594ef533cc..a6a1de9db16f 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S | |||
@@ -58,7 +58,7 @@ sys_call_table: | |||
58 | .quad sys_open /* 45 */ | 58 | .quad sys_open /* 45 */ |
59 | .quad alpha_ni_syscall | 59 | .quad alpha_ni_syscall |
60 | .quad sys_getxgid | 60 | .quad sys_getxgid |
61 | .quad osf_sigprocmask | 61 | .quad sys_osf_sigprocmask |
62 | .quad alpha_ni_syscall | 62 | .quad alpha_ni_syscall |
63 | .quad alpha_ni_syscall /* 50 */ | 63 | .quad alpha_ni_syscall /* 50 */ |
64 | .quad sys_acct | 64 | .quad sys_acct |