diff options
author | Robert Richter <robert.richter@amd.com> | 2010-10-15 06:45:00 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-10-15 06:45:00 -0400 |
commit | 6268464b370e234e0255330190f9bd5d19386ad7 (patch) | |
tree | 5742641092ce64227dd2086d78baaede57da1f80 /arch | |
parent | 7df01d96b295e400167e78061b81d4c91630b12d (diff) | |
parent | 0fdf13606b67f830559abdaad15980c7f4f05ec4 (diff) |
Merge remote branch 'tip/perf/core' into oprofile/core
Conflicts:
arch/arm/oprofile/common.c
kernel/perf_event.c
Diffstat (limited to 'arch')
138 files changed, 1960 insertions, 1203 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index fe48fc7a3eba..53d7f619a1b9 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -158,4 +158,7 @@ config HAVE_PERF_EVENTS_NMI | |||
158 | subsystem. Also has support for calculating CPU cycle events | 158 | subsystem. Also has support for calculating CPU cycle events |
159 | to determine how many clock cycles in a given period. | 159 | to determine how many clock cycles in a given period. |
160 | 160 | ||
161 | config HAVE_ARCH_JUMP_LABEL | ||
162 | bool | ||
163 | |||
161 | source "kernel/gcov/Kconfig" | 164 | source "kernel/gcov/Kconfig" |
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index ab1ee0ab082b..6d159cee5f2f 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S | |||
@@ -73,8 +73,6 @@ | |||
73 | ldq $20, HAE_REG($19); \ | 73 | ldq $20, HAE_REG($19); \ |
74 | stq $21, HAE_CACHE($19); \ | 74 | stq $21, HAE_CACHE($19); \ |
75 | stq $21, 0($20); \ | 75 | stq $21, 0($20); \ |
76 | ldq $0, 0($sp); \ | ||
77 | ldq $1, 8($sp); \ | ||
78 | 99:; \ | 76 | 99:; \ |
79 | ldq $19, 72($sp); \ | 77 | ldq $19, 72($sp); \ |
80 | ldq $20, 80($sp); \ | 78 | ldq $20, 80($sp); \ |
@@ -316,7 +314,7 @@ ret_from_sys_call: | |||
316 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ | 314 | cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ |
317 | ldq $0, SP_OFF($sp) | 315 | ldq $0, SP_OFF($sp) |
318 | and $0, 8, $0 | 316 | and $0, 8, $0 |
319 | beq $0, restore_all | 317 | beq $0, ret_to_kernel |
320 | ret_to_user: | 318 | ret_to_user: |
321 | /* Make sure need_resched and sigpending don't change between | 319 | /* Make sure need_resched and sigpending don't change between |
322 | sampling and the rti. */ | 320 | sampling and the rti. */ |
@@ -329,6 +327,11 @@ restore_all: | |||
329 | RESTORE_ALL | 327 | RESTORE_ALL |
330 | call_pal PAL_rti | 328 | call_pal PAL_rti |
331 | 329 | ||
330 | ret_to_kernel: | ||
331 | lda $16, 7 | ||
332 | call_pal PAL_swpipl | ||
333 | br restore_all | ||
334 | |||
332 | .align 3 | 335 | .align 3 |
333 | $syscall_error: | 336 | $syscall_error: |
334 | /* | 337 | /* |
@@ -657,7 +660,7 @@ kernel_thread: | |||
657 | /* We don't actually care for a3 success widgetry in the kernel. | 660 | /* We don't actually care for a3 success widgetry in the kernel. |
658 | Not for positive errno values. */ | 661 | Not for positive errno values. */ |
659 | stq $0, 0($sp) /* $0 */ | 662 | stq $0, 0($sp) /* $0 */ |
660 | br restore_all | 663 | br ret_to_kernel |
661 | .end kernel_thread | 664 | .end kernel_thread |
662 | 665 | ||
663 | /* | 666 | /* |
@@ -912,15 +915,6 @@ sys_execve: | |||
912 | .end sys_execve | 915 | .end sys_execve |
913 | 916 | ||
914 | .align 4 | 917 | .align 4 |
915 | .globl osf_sigprocmask | ||
916 | .ent osf_sigprocmask | ||
917 | osf_sigprocmask: | ||
918 | .prologue 0 | ||
919 | mov $sp, $18 | ||
920 | jmp $31, sys_osf_sigprocmask | ||
921 | .end osf_sigprocmask | ||
922 | |||
923 | .align 4 | ||
924 | .globl alpha_ni_syscall | 918 | .globl alpha_ni_syscall |
925 | .ent alpha_ni_syscall | 919 | .ent alpha_ni_syscall |
926 | alpha_ni_syscall: | 920 | alpha_ni_syscall: |
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 85d8e4f58c83..1cc49683fb69 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c | |||
@@ -307,7 +307,7 @@ again: | |||
307 | new_raw_count) != prev_raw_count) | 307 | new_raw_count) != prev_raw_count) |
308 | goto again; | 308 | goto again; |
309 | 309 | ||
310 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; | 310 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; |
311 | 311 | ||
312 | /* It is possible on very rare occasions that the PMC has overflowed | 312 | /* It is possible on very rare occasions that the PMC has overflowed |
313 | * but the interrupt is yet to come. Detect and fix this situation. | 313 | * but the interrupt is yet to come. Detect and fix this situation. |
@@ -402,14 +402,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
402 | struct hw_perf_event *hwc = &pe->hw; | 402 | struct hw_perf_event *hwc = &pe->hw; |
403 | int idx = hwc->idx; | 403 | int idx = hwc->idx; |
404 | 404 | ||
405 | if (cpuc->current_idx[j] != PMC_NO_INDEX) { | 405 | if (cpuc->current_idx[j] == PMC_NO_INDEX) { |
406 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | 406 | alpha_perf_event_set_period(pe, hwc, idx); |
407 | continue; | 407 | cpuc->current_idx[j] = idx; |
408 | } | 408 | } |
409 | 409 | ||
410 | alpha_perf_event_set_period(pe, hwc, idx); | 410 | if (!(hwc->state & PERF_HES_STOPPED)) |
411 | cpuc->current_idx[j] = idx; | 411 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); |
412 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | ||
413 | } | 412 | } |
414 | cpuc->config = cpuc->event[0]->hw.config_base; | 413 | cpuc->config = cpuc->event[0]->hw.config_base; |
415 | } | 414 | } |
@@ -420,12 +419,13 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |||
420 | * - this function is called from outside this module via the pmu struct | 419 | * - this function is called from outside this module via the pmu struct |
421 | * returned from perf event initialisation. | 420 | * returned from perf event initialisation. |
422 | */ | 421 | */ |
423 | static int alpha_pmu_enable(struct perf_event *event) | 422 | static int alpha_pmu_add(struct perf_event *event, int flags) |
424 | { | 423 | { |
425 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 424 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
425 | struct hw_perf_event *hwc = &event->hw; | ||
426 | int n0; | 426 | int n0; |
427 | int ret; | 427 | int ret; |
428 | unsigned long flags; | 428 | unsigned long irq_flags; |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * The Sparc code has the IRQ disable first followed by the perf | 431 | * The Sparc code has the IRQ disable first followed by the perf |
@@ -435,8 +435,8 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
435 | * nevertheless we disable the PMCs first to enable a potential | 435 | * nevertheless we disable the PMCs first to enable a potential |
436 | * final PMI to occur before we disable interrupts. | 436 | * final PMI to occur before we disable interrupts. |
437 | */ | 437 | */ |
438 | perf_disable(); | 438 | perf_pmu_disable(event->pmu); |
439 | local_irq_save(flags); | 439 | local_irq_save(irq_flags); |
440 | 440 | ||
441 | /* Default to error to be returned */ | 441 | /* Default to error to be returned */ |
442 | ret = -EAGAIN; | 442 | ret = -EAGAIN; |
@@ -455,8 +455,12 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | local_irq_restore(flags); | 458 | hwc->state = PERF_HES_UPTODATE; |
459 | perf_enable(); | 459 | if (!(flags & PERF_EF_START)) |
460 | hwc->state |= PERF_HES_STOPPED; | ||
461 | |||
462 | local_irq_restore(irq_flags); | ||
463 | perf_pmu_enable(event->pmu); | ||
460 | 464 | ||
461 | return ret; | 465 | return ret; |
462 | } | 466 | } |
@@ -467,15 +471,15 @@ static int alpha_pmu_enable(struct perf_event *event) | |||
467 | * - this function is called from outside this module via the pmu struct | 471 | * - this function is called from outside this module via the pmu struct |
468 | * returned from perf event initialisation. | 472 | * returned from perf event initialisation. |
469 | */ | 473 | */ |
470 | static void alpha_pmu_disable(struct perf_event *event) | 474 | static void alpha_pmu_del(struct perf_event *event, int flags) |
471 | { | 475 | { |
472 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 476 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
473 | struct hw_perf_event *hwc = &event->hw; | 477 | struct hw_perf_event *hwc = &event->hw; |
474 | unsigned long flags; | 478 | unsigned long irq_flags; |
475 | int j; | 479 | int j; |
476 | 480 | ||
477 | perf_disable(); | 481 | perf_pmu_disable(event->pmu); |
478 | local_irq_save(flags); | 482 | local_irq_save(irq_flags); |
479 | 483 | ||
480 | for (j = 0; j < cpuc->n_events; j++) { | 484 | for (j = 0; j < cpuc->n_events; j++) { |
481 | if (event == cpuc->event[j]) { | 485 | if (event == cpuc->event[j]) { |
@@ -501,8 +505,8 @@ static void alpha_pmu_disable(struct perf_event *event) | |||
501 | } | 505 | } |
502 | } | 506 | } |
503 | 507 | ||
504 | local_irq_restore(flags); | 508 | local_irq_restore(irq_flags); |
505 | perf_enable(); | 509 | perf_pmu_enable(event->pmu); |
506 | } | 510 | } |
507 | 511 | ||
508 | 512 | ||
@@ -514,13 +518,44 @@ static void alpha_pmu_read(struct perf_event *event) | |||
514 | } | 518 | } |
515 | 519 | ||
516 | 520 | ||
517 | static void alpha_pmu_unthrottle(struct perf_event *event) | 521 | static void alpha_pmu_stop(struct perf_event *event, int flags) |
522 | { | ||
523 | struct hw_perf_event *hwc = &event->hw; | ||
524 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
525 | |||
526 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
527 | cpuc->idx_mask &= ~(1UL<<hwc->idx); | ||
528 | hwc->state |= PERF_HES_STOPPED; | ||
529 | } | ||
530 | |||
531 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | ||
532 | alpha_perf_event_update(event, hwc, hwc->idx, 0); | ||
533 | hwc->state |= PERF_HES_UPTODATE; | ||
534 | } | ||
535 | |||
536 | if (cpuc->enabled) | ||
537 | wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); | ||
538 | } | ||
539 | |||
540 | |||
541 | static void alpha_pmu_start(struct perf_event *event, int flags) | ||
518 | { | 542 | { |
519 | struct hw_perf_event *hwc = &event->hw; | 543 | struct hw_perf_event *hwc = &event->hw; |
520 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 544 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
521 | 545 | ||
546 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | ||
547 | return; | ||
548 | |||
549 | if (flags & PERF_EF_RELOAD) { | ||
550 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
551 | alpha_perf_event_set_period(event, hwc, hwc->idx); | ||
552 | } | ||
553 | |||
554 | hwc->state = 0; | ||
555 | |||
522 | cpuc->idx_mask |= 1UL<<hwc->idx; | 556 | cpuc->idx_mask |= 1UL<<hwc->idx; |
523 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | 557 | if (cpuc->enabled) |
558 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | ||
524 | } | 559 | } |
525 | 560 | ||
526 | 561 | ||
@@ -642,39 +677,36 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
642 | return 0; | 677 | return 0; |
643 | } | 678 | } |
644 | 679 | ||
645 | static const struct pmu pmu = { | ||
646 | .enable = alpha_pmu_enable, | ||
647 | .disable = alpha_pmu_disable, | ||
648 | .read = alpha_pmu_read, | ||
649 | .unthrottle = alpha_pmu_unthrottle, | ||
650 | }; | ||
651 | |||
652 | |||
653 | /* | 680 | /* |
654 | * Main entry point to initialise a HW performance event. | 681 | * Main entry point to initialise a HW performance event. |
655 | */ | 682 | */ |
656 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 683 | static int alpha_pmu_event_init(struct perf_event *event) |
657 | { | 684 | { |
658 | int err; | 685 | int err; |
659 | 686 | ||
687 | switch (event->attr.type) { | ||
688 | case PERF_TYPE_RAW: | ||
689 | case PERF_TYPE_HARDWARE: | ||
690 | case PERF_TYPE_HW_CACHE: | ||
691 | break; | ||
692 | |||
693 | default: | ||
694 | return -ENOENT; | ||
695 | } | ||
696 | |||
660 | if (!alpha_pmu) | 697 | if (!alpha_pmu) |
661 | return ERR_PTR(-ENODEV); | 698 | return -ENODEV; |
662 | 699 | ||
663 | /* Do the real initialisation work. */ | 700 | /* Do the real initialisation work. */ |
664 | err = __hw_perf_event_init(event); | 701 | err = __hw_perf_event_init(event); |
665 | 702 | ||
666 | if (err) | 703 | return err; |
667 | return ERR_PTR(err); | ||
668 | |||
669 | return &pmu; | ||
670 | } | 704 | } |
671 | 705 | ||
672 | |||
673 | |||
674 | /* | 706 | /* |
675 | * Main entry point - enable HW performance counters. | 707 | * Main entry point - enable HW performance counters. |
676 | */ | 708 | */ |
677 | void hw_perf_enable(void) | 709 | static void alpha_pmu_enable(struct pmu *pmu) |
678 | { | 710 | { |
679 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 711 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
680 | 712 | ||
@@ -700,7 +732,7 @@ void hw_perf_enable(void) | |||
700 | * Main entry point - disable HW performance counters. | 732 | * Main entry point - disable HW performance counters. |
701 | */ | 733 | */ |
702 | 734 | ||
703 | void hw_perf_disable(void) | 735 | static void alpha_pmu_disable(struct pmu *pmu) |
704 | { | 736 | { |
705 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 737 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
706 | 738 | ||
@@ -713,6 +745,17 @@ void hw_perf_disable(void) | |||
713 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 745 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
714 | } | 746 | } |
715 | 747 | ||
748 | static struct pmu pmu = { | ||
749 | .pmu_enable = alpha_pmu_enable, | ||
750 | .pmu_disable = alpha_pmu_disable, | ||
751 | .event_init = alpha_pmu_event_init, | ||
752 | .add = alpha_pmu_add, | ||
753 | .del = alpha_pmu_del, | ||
754 | .start = alpha_pmu_start, | ||
755 | .stop = alpha_pmu_stop, | ||
756 | .read = alpha_pmu_read, | ||
757 | }; | ||
758 | |||
716 | 759 | ||
717 | /* | 760 | /* |
718 | * Main entry point - don't know when this is called but it | 761 | * Main entry point - don't know when this is called but it |
@@ -766,7 +809,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
766 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | 809 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); |
767 | 810 | ||
768 | /* la_ptr is the counter that overflowed. */ | 811 | /* la_ptr is the counter that overflowed. */ |
769 | if (unlikely(la_ptr >= perf_max_events)) { | 812 | if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { |
770 | /* This should never occur! */ | 813 | /* This should never occur! */ |
771 | irq_err_count++; | 814 | irq_err_count++; |
772 | pr_warning("PMI: silly index %ld\n", la_ptr); | 815 | pr_warning("PMI: silly index %ld\n", la_ptr); |
@@ -807,7 +850,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |||
807 | /* Interrupts coming too quickly; "throttle" the | 850 | /* Interrupts coming too quickly; "throttle" the |
808 | * counter, i.e., disable it for a little while. | 851 | * counter, i.e., disable it for a little while. |
809 | */ | 852 | */ |
810 | cpuc->idx_mask &= ~(1UL<<idx); | 853 | alpha_pmu_stop(event, 0); |
811 | } | 854 | } |
812 | } | 855 | } |
813 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | 856 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); |
@@ -837,6 +880,7 @@ void __init init_hw_perf_events(void) | |||
837 | 880 | ||
838 | /* And set up PMU specification */ | 881 | /* And set up PMU specification */ |
839 | alpha_pmu = &ev67_pmu; | 882 | alpha_pmu = &ev67_pmu; |
840 | perf_max_events = alpha_pmu->num_pmcs; | 883 | |
884 | perf_pmu_register(&pmu); | ||
841 | } | 885 | } |
842 | 886 | ||
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 842dba308eab..3ec35066f1dc 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) | |||
356 | dest[27] = pt->r27; | 356 | dest[27] = pt->r27; |
357 | dest[28] = pt->r28; | 357 | dest[28] = pt->r28; |
358 | dest[29] = pt->gp; | 358 | dest[29] = pt->gp; |
359 | dest[30] = rdusp(); | 359 | dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; |
360 | dest[31] = pt->pc; | 360 | dest[31] = pt->pc; |
361 | 361 | ||
362 | /* Once upon a time this was the PS value. Which is stupid | 362 | /* Once upon a time this was the PS value. Which is stupid |
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 0f6b51ae865a..6f7feb5db271 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c | |||
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *, | |||
41 | /* | 41 | /* |
42 | * The OSF/1 sigprocmask calling sequence is different from the | 42 | * The OSF/1 sigprocmask calling sequence is different from the |
43 | * C sigprocmask() sequence.. | 43 | * C sigprocmask() sequence.. |
44 | * | ||
45 | * how: | ||
46 | * 1 - SIG_BLOCK | ||
47 | * 2 - SIG_UNBLOCK | ||
48 | * 3 - SIG_SETMASK | ||
49 | * | ||
50 | * We change the range to -1 .. 1 in order to let gcc easily | ||
51 | * use the conditional move instructions. | ||
52 | * | ||
53 | * Note that we don't need to acquire the kernel lock for SMP | ||
54 | * operation, as all of this is local to this thread. | ||
55 | */ | 44 | */ |
56 | SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, | 45 | SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) |
57 | struct pt_regs *, regs) | ||
58 | { | 46 | { |
59 | unsigned long oldmask = -EINVAL; | 47 | sigset_t oldmask; |
60 | 48 | sigset_t mask; | |
61 | if ((unsigned long)how-1 <= 2) { | 49 | unsigned long res; |
62 | long sign = how-2; /* -1 .. 1 */ | 50 | |
63 | unsigned long block, unblock; | 51 | siginitset(&mask, newmask & _BLOCKABLE); |
64 | 52 | res = sigprocmask(how, &mask, &oldmask); | |
65 | newmask &= _BLOCKABLE; | 53 | if (!res) { |
66 | spin_lock_irq(¤t->sighand->siglock); | 54 | force_successful_syscall_return(); |
67 | oldmask = current->blocked.sig[0]; | 55 | res = oldmask.sig[0]; |
68 | |||
69 | unblock = oldmask & ~newmask; | ||
70 | block = oldmask | newmask; | ||
71 | if (!sign) | ||
72 | block = unblock; | ||
73 | if (sign <= 0) | ||
74 | newmask = block; | ||
75 | if (_NSIG_WORDS > 1 && sign > 0) | ||
76 | sigemptyset(¤t->blocked); | ||
77 | current->blocked.sig[0] = newmask; | ||
78 | recalc_sigpending(); | ||
79 | spin_unlock_irq(¤t->sighand->siglock); | ||
80 | |||
81 | regs->r0 = 0; /* special no error return */ | ||
82 | } | 56 | } |
83 | return oldmask; | 57 | return res; |
84 | } | 58 | } |
85 | 59 | ||
86 | SYSCALL_DEFINE3(osf_sigaction, int, sig, | 60 | SYSCALL_DEFINE3(osf_sigaction, int, sig, |
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, | |||
94 | old_sigset_t mask; | 68 | old_sigset_t mask; |
95 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 69 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
96 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 70 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
97 | __get_user(new_ka.sa.sa_flags, &act->sa_flags)) | 71 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
72 | __get_user(mask, &act->sa_mask)) | ||
98 | return -EFAULT; | 73 | return -EFAULT; |
99 | __get_user(mask, &act->sa_mask); | ||
100 | siginitset(&new_ka.sa.sa_mask, mask); | 74 | siginitset(&new_ka.sa.sa_mask, mask); |
101 | new_ka.ka_restorer = NULL; | 75 | new_ka.ka_restorer = NULL; |
102 | } | 76 | } |
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig, | |||
106 | if (!ret && oact) { | 80 | if (!ret && oact) { |
107 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | 81 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
108 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 82 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
109 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) | 83 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
84 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
110 | return -EFAULT; | 85 | return -EFAULT; |
111 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
112 | } | 86 | } |
113 | 87 | ||
114 | return ret; | 88 | return ret; |
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index ce594ef533cc..a6a1de9db16f 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S | |||
@@ -58,7 +58,7 @@ sys_call_table: | |||
58 | .quad sys_open /* 45 */ | 58 | .quad sys_open /* 45 */ |
59 | .quad alpha_ni_syscall | 59 | .quad alpha_ni_syscall |
60 | .quad sys_getxgid | 60 | .quad sys_getxgid |
61 | .quad osf_sigprocmask | 61 | .quad sys_osf_sigprocmask |
62 | .quad alpha_ni_syscall | 62 | .quad alpha_ni_syscall |
63 | .quad alpha_ni_syscall /* 50 */ | 63 | .quad alpha_ni_syscall /* 50 */ |
64 | .quad sys_acct | 64 | .quad sys_acct |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 553b7cf17bfb..88c97bc7a6f5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -271,7 +271,6 @@ config ARCH_AT91 | |||
271 | bool "Atmel AT91" | 271 | bool "Atmel AT91" |
272 | select ARCH_REQUIRE_GPIOLIB | 272 | select ARCH_REQUIRE_GPIOLIB |
273 | select HAVE_CLK | 273 | select HAVE_CLK |
274 | select ARCH_USES_GETTIMEOFFSET | ||
275 | help | 274 | help |
276 | This enables support for systems based on the Atmel AT91RM9200, | 275 | This enables support for systems based on the Atmel AT91RM9200, |
277 | AT91SAM9 and AT91CAP9 processors. | 276 | AT91SAM9 and AT91CAP9 processors. |
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075 | |||
1051 | ACTLR register. Note that setting specific bits in the ACTLR register | 1050 | ACTLR register. Note that setting specific bits in the ACTLR register |
1052 | may not be available in non-secure mode. | 1051 | may not be available in non-secure mode. |
1053 | 1052 | ||
1053 | config ARM_ERRATA_742230 | ||
1054 | bool "ARM errata: DMB operation may be faulty" | ||
1055 | depends on CPU_V7 && SMP | ||
1056 | help | ||
1057 | This option enables the workaround for the 742230 Cortex-A9 | ||
1058 | (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction | ||
1059 | between two write operations may not ensure the correct visibility | ||
1060 | ordering of the two writes. This workaround sets a specific bit in | ||
1061 | the diagnostic register of the Cortex-A9 which causes the DMB | ||
1062 | instruction to behave as a DSB, ensuring the correct behaviour of | ||
1063 | the two writes. | ||
1064 | |||
1065 | config ARM_ERRATA_742231 | ||
1066 | bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption" | ||
1067 | depends on CPU_V7 && SMP | ||
1068 | help | ||
1069 | This option enables the workaround for the 742231 Cortex-A9 | ||
1070 | (r2p0..r2p2) erratum. Under certain conditions, specific to the | ||
1071 | Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode, | ||
1072 | accessing some data located in the same cache line, may get corrupted | ||
1073 | data due to bad handling of the address hazard when the line gets | ||
1074 | replaced from one of the CPUs at the same time as another CPU is | ||
1075 | accessing it. This workaround sets specific bits in the diagnostic | ||
1076 | register of the Cortex-A9 which reduces the linefill issuing | ||
1077 | capabilities of the processor. | ||
1078 | |||
1054 | config PL310_ERRATA_588369 | 1079 | config PL310_ERRATA_588369 |
1055 | bool "Clean & Invalidate maintenance operations do not invalidate clean lines" | 1080 | bool "Clean & Invalidate maintenance operations do not invalidate clean lines" |
1056 | depends on CACHE_L2X0 && ARCH_OMAP4 | 1081 | depends on CACHE_L2X0 && ARCH_OMAP4 |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index b23f6bc46cfa..65a7c1c588a9 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic= | |||
116 | $(obj)/font.c: $(FONTC) | 116 | $(obj)/font.c: $(FONTC) |
117 | $(call cmd,shipped) | 117 | $(call cmd,shipped) |
118 | 118 | ||
119 | $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config | 119 | $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) |
120 | @sed "$(SEDFLAGS)" < $< > $@ | 120 | @sed "$(SEDFLAGS)" < $< > $@ |
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 7974baacafce..1bec96e85196 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -271,6 +271,14 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | |||
271 | ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); | 271 | ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); |
272 | } | 272 | } |
273 | 273 | ||
274 | int dma_set_coherent_mask(struct device *dev, u64 mask) | ||
275 | { | ||
276 | if (mask >= PHYS_OFFSET + SZ_64M - 1) | ||
277 | return 0; | ||
278 | |||
279 | return -EIO; | ||
280 | } | ||
281 | |||
274 | int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) | 282 | int __init it8152_pci_setup(int nr, struct pci_sys_data *sys) |
275 | { | 283 | { |
276 | it8152_io.start = IT8152_IO_BASE + 0x12000; | 284 | it8152_io.start = IT8152_IO_BASE + 0x12000; |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index ab68cf1ef80f..e90b167ea848 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | |||
317 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | 317 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE |
318 | #define pgprot_dmacoherent(prot) \ | 318 | #define pgprot_dmacoherent(prot) \ |
319 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) | 319 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) |
320 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
321 | struct file; | ||
322 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
323 | unsigned long size, pgprot_t vma_prot); | ||
320 | #else | 324 | #else |
321 | #define pgprot_dmacoherent(prot) \ | 325 | #define pgprot_dmacoherent(prot) \ |
322 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) | 326 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 1b560825e1cf..7885722bdf4e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -48,6 +48,8 @@ work_pending: | |||
48 | beq no_work_pending | 48 | beq no_work_pending |
49 | mov r0, sp @ 'regs' | 49 | mov r0, sp @ 'regs' |
50 | mov r2, why @ 'syscall' | 50 | mov r2, why @ 'syscall' |
51 | tst r1, #_TIF_SIGPENDING @ delivering a signal? | ||
52 | movne why, #0 @ prevent further restarts | ||
51 | bl do_notify_resume | 53 | bl do_notify_resume |
52 | b ret_slow_syscall @ Check work again | 54 | b ret_slow_syscall @ Check work again |
53 | 55 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index ef3bc331518f..6cc6521881aa 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -227,46 +227,56 @@ again: | |||
227 | } | 227 | } |
228 | 228 | ||
229 | static void | 229 | static void |
230 | armpmu_disable(struct perf_event *event) | 230 | armpmu_read(struct perf_event *event) |
231 | { | 231 | { |
232 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
233 | struct hw_perf_event *hwc = &event->hw; | 232 | struct hw_perf_event *hwc = &event->hw; |
234 | int idx = hwc->idx; | ||
235 | |||
236 | WARN_ON(idx < 0); | ||
237 | |||
238 | clear_bit(idx, cpuc->active_mask); | ||
239 | armpmu->disable(hwc, idx); | ||
240 | |||
241 | barrier(); | ||
242 | 233 | ||
243 | armpmu_event_update(event, hwc, idx); | 234 | /* Don't read disabled counters! */ |
244 | cpuc->events[idx] = NULL; | 235 | if (hwc->idx < 0) |
245 | clear_bit(idx, cpuc->used_mask); | 236 | return; |
246 | 237 | ||
247 | perf_event_update_userpage(event); | 238 | armpmu_event_update(event, hwc, hwc->idx); |
248 | } | 239 | } |
249 | 240 | ||
250 | static void | 241 | static void |
251 | armpmu_read(struct perf_event *event) | 242 | armpmu_stop(struct perf_event *event, int flags) |
252 | { | 243 | { |
253 | struct hw_perf_event *hwc = &event->hw; | 244 | struct hw_perf_event *hwc = &event->hw; |
254 | 245 | ||
255 | /* Don't read disabled counters! */ | 246 | if (!armpmu) |
256 | if (hwc->idx < 0) | ||
257 | return; | 247 | return; |
258 | 248 | ||
259 | armpmu_event_update(event, hwc, hwc->idx); | 249 | /* |
250 | * ARM pmu always has to update the counter, so ignore | ||
251 | * PERF_EF_UPDATE, see comments in armpmu_start(). | ||
252 | */ | ||
253 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
254 | armpmu->disable(hwc, hwc->idx); | ||
255 | barrier(); /* why? */ | ||
256 | armpmu_event_update(event, hwc, hwc->idx); | ||
257 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
258 | } | ||
260 | } | 259 | } |
261 | 260 | ||
262 | static void | 261 | static void |
263 | armpmu_unthrottle(struct perf_event *event) | 262 | armpmu_start(struct perf_event *event, int flags) |
264 | { | 263 | { |
265 | struct hw_perf_event *hwc = &event->hw; | 264 | struct hw_perf_event *hwc = &event->hw; |
266 | 265 | ||
266 | if (!armpmu) | ||
267 | return; | ||
268 | |||
269 | /* | ||
270 | * ARM pmu always has to reprogram the period, so ignore | ||
271 | * PERF_EF_RELOAD, see the comment below. | ||
272 | */ | ||
273 | if (flags & PERF_EF_RELOAD) | ||
274 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
275 | |||
276 | hwc->state = 0; | ||
267 | /* | 277 | /* |
268 | * Set the period again. Some counters can't be stopped, so when we | 278 | * Set the period again. Some counters can't be stopped, so when we |
269 | * were throttled we simply disabled the IRQ source and the counter | 279 | * were stopped we simply disabled the IRQ source and the counter |
270 | * may have been left counting. If we don't do this step then we may | 280 | * may have been left counting. If we don't do this step then we may |
271 | * get an interrupt too soon or *way* too late if the overflow has | 281 | * get an interrupt too soon or *way* too late if the overflow has |
272 | * happened since disabling. | 282 | * happened since disabling. |
@@ -275,14 +285,33 @@ armpmu_unthrottle(struct perf_event *event) | |||
275 | armpmu->enable(hwc, hwc->idx); | 285 | armpmu->enable(hwc, hwc->idx); |
276 | } | 286 | } |
277 | 287 | ||
288 | static void | ||
289 | armpmu_del(struct perf_event *event, int flags) | ||
290 | { | ||
291 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
292 | struct hw_perf_event *hwc = &event->hw; | ||
293 | int idx = hwc->idx; | ||
294 | |||
295 | WARN_ON(idx < 0); | ||
296 | |||
297 | clear_bit(idx, cpuc->active_mask); | ||
298 | armpmu_stop(event, PERF_EF_UPDATE); | ||
299 | cpuc->events[idx] = NULL; | ||
300 | clear_bit(idx, cpuc->used_mask); | ||
301 | |||
302 | perf_event_update_userpage(event); | ||
303 | } | ||
304 | |||
278 | static int | 305 | static int |
279 | armpmu_enable(struct perf_event *event) | 306 | armpmu_add(struct perf_event *event, int flags) |
280 | { | 307 | { |
281 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 308 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
282 | struct hw_perf_event *hwc = &event->hw; | 309 | struct hw_perf_event *hwc = &event->hw; |
283 | int idx; | 310 | int idx; |
284 | int err = 0; | 311 | int err = 0; |
285 | 312 | ||
313 | perf_pmu_disable(event->pmu); | ||
314 | |||
286 | /* If we don't have a space for the counter then finish early. */ | 315 | /* If we don't have a space for the counter then finish early. */ |
287 | idx = armpmu->get_event_idx(cpuc, hwc); | 316 | idx = armpmu->get_event_idx(cpuc, hwc); |
288 | if (idx < 0) { | 317 | if (idx < 0) { |
@@ -299,25 +328,19 @@ armpmu_enable(struct perf_event *event) | |||
299 | cpuc->events[idx] = event; | 328 | cpuc->events[idx] = event; |
300 | set_bit(idx, cpuc->active_mask); | 329 | set_bit(idx, cpuc->active_mask); |
301 | 330 | ||
302 | /* Set the period for the event. */ | 331 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
303 | armpmu_event_set_period(event, hwc, idx); | 332 | if (flags & PERF_EF_START) |
304 | 333 | armpmu_start(event, PERF_EF_RELOAD); | |
305 | /* Enable the event. */ | ||
306 | armpmu->enable(hwc, idx); | ||
307 | 334 | ||
308 | /* Propagate our changes to the userspace mapping. */ | 335 | /* Propagate our changes to the userspace mapping. */ |
309 | perf_event_update_userpage(event); | 336 | perf_event_update_userpage(event); |
310 | 337 | ||
311 | out: | 338 | out: |
339 | perf_pmu_enable(event->pmu); | ||
312 | return err; | 340 | return err; |
313 | } | 341 | } |
314 | 342 | ||
315 | static struct pmu pmu = { | 343 | static struct pmu pmu; |
316 | .enable = armpmu_enable, | ||
317 | .disable = armpmu_disable, | ||
318 | .unthrottle = armpmu_unthrottle, | ||
319 | .read = armpmu_read, | ||
320 | }; | ||
321 | 344 | ||
322 | static int | 345 | static int |
323 | validate_event(struct cpu_hw_events *cpuc, | 346 | validate_event(struct cpu_hw_events *cpuc, |
@@ -497,20 +520,29 @@ __hw_perf_event_init(struct perf_event *event) | |||
497 | return err; | 520 | return err; |
498 | } | 521 | } |
499 | 522 | ||
500 | const struct pmu * | 523 | static int armpmu_event_init(struct perf_event *event) |
501 | hw_perf_event_init(struct perf_event *event) | ||
502 | { | 524 | { |
503 | int err = 0; | 525 | int err = 0; |
504 | 526 | ||
527 | switch (event->attr.type) { | ||
528 | case PERF_TYPE_RAW: | ||
529 | case PERF_TYPE_HARDWARE: | ||
530 | case PERF_TYPE_HW_CACHE: | ||
531 | break; | ||
532 | |||
533 | default: | ||
534 | return -ENOENT; | ||
535 | } | ||
536 | |||
505 | if (!armpmu) | 537 | if (!armpmu) |
506 | return ERR_PTR(-ENODEV); | 538 | return -ENODEV; |
507 | 539 | ||
508 | event->destroy = hw_perf_event_destroy; | 540 | event->destroy = hw_perf_event_destroy; |
509 | 541 | ||
510 | if (!atomic_inc_not_zero(&active_events)) { | 542 | if (!atomic_inc_not_zero(&active_events)) { |
511 | if (atomic_read(&active_events) > perf_max_events) { | 543 | if (atomic_read(&active_events) > armpmu->num_events) { |
512 | atomic_dec(&active_events); | 544 | atomic_dec(&active_events); |
513 | return ERR_PTR(-ENOSPC); | 545 | return -ENOSPC; |
514 | } | 546 | } |
515 | 547 | ||
516 | mutex_lock(&pmu_reserve_mutex); | 548 | mutex_lock(&pmu_reserve_mutex); |
@@ -524,17 +556,16 @@ hw_perf_event_init(struct perf_event *event) | |||
524 | } | 556 | } |
525 | 557 | ||
526 | if (err) | 558 | if (err) |
527 | return ERR_PTR(err); | 559 | return err; |
528 | 560 | ||
529 | err = __hw_perf_event_init(event); | 561 | err = __hw_perf_event_init(event); |
530 | if (err) | 562 | if (err) |
531 | hw_perf_event_destroy(event); | 563 | hw_perf_event_destroy(event); |
532 | 564 | ||
533 | return err ? ERR_PTR(err) : &pmu; | 565 | return err; |
534 | } | 566 | } |
535 | 567 | ||
536 | void | 568 | static void armpmu_enable(struct pmu *pmu) |
537 | hw_perf_enable(void) | ||
538 | { | 569 | { |
539 | /* Enable all of the perf events on hardware. */ | 570 | /* Enable all of the perf events on hardware. */ |
540 | int idx; | 571 | int idx; |
@@ -555,13 +586,23 @@ hw_perf_enable(void) | |||
555 | armpmu->start(); | 586 | armpmu->start(); |
556 | } | 587 | } |
557 | 588 | ||
558 | void | 589 | static void armpmu_disable(struct pmu *pmu) |
559 | hw_perf_disable(void) | ||
560 | { | 590 | { |
561 | if (armpmu) | 591 | if (armpmu) |
562 | armpmu->stop(); | 592 | armpmu->stop(); |
563 | } | 593 | } |
564 | 594 | ||
595 | static struct pmu pmu = { | ||
596 | .pmu_enable = armpmu_enable, | ||
597 | .pmu_disable = armpmu_disable, | ||
598 | .event_init = armpmu_event_init, | ||
599 | .add = armpmu_add, | ||
600 | .del = armpmu_del, | ||
601 | .start = armpmu_start, | ||
602 | .stop = armpmu_stop, | ||
603 | .read = armpmu_read, | ||
604 | }; | ||
605 | |||
565 | /* | 606 | /* |
566 | * ARMv6 Performance counter handling code. | 607 | * ARMv6 Performance counter handling code. |
567 | * | 608 | * |
@@ -2939,14 +2980,12 @@ init_hw_perf_events(void) | |||
2939 | armpmu = &armv6pmu; | 2980 | armpmu = &armv6pmu; |
2940 | memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, | 2981 | memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, |
2941 | sizeof(armv6_perf_cache_map)); | 2982 | sizeof(armv6_perf_cache_map)); |
2942 | perf_max_events = armv6pmu.num_events; | ||
2943 | break; | 2983 | break; |
2944 | case 0xB020: /* ARM11mpcore */ | 2984 | case 0xB020: /* ARM11mpcore */ |
2945 | armpmu = &armv6mpcore_pmu; | 2985 | armpmu = &armv6mpcore_pmu; |
2946 | memcpy(armpmu_perf_cache_map, | 2986 | memcpy(armpmu_perf_cache_map, |
2947 | armv6mpcore_perf_cache_map, | 2987 | armv6mpcore_perf_cache_map, |
2948 | sizeof(armv6mpcore_perf_cache_map)); | 2988 | sizeof(armv6mpcore_perf_cache_map)); |
2949 | perf_max_events = armv6mpcore_pmu.num_events; | ||
2950 | break; | 2989 | break; |
2951 | case 0xC080: /* Cortex-A8 */ | 2990 | case 0xC080: /* Cortex-A8 */ |
2952 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | 2991 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; |
@@ -2958,7 +2997,6 @@ init_hw_perf_events(void) | |||
2958 | /* Reset PMNC and read the nb of CNTx counters | 2997 | /* Reset PMNC and read the nb of CNTx counters |
2959 | supported */ | 2998 | supported */ |
2960 | armv7pmu.num_events = armv7_reset_read_pmnc(); | 2999 | armv7pmu.num_events = armv7_reset_read_pmnc(); |
2961 | perf_max_events = armv7pmu.num_events; | ||
2962 | break; | 3000 | break; |
2963 | case 0xC090: /* Cortex-A9 */ | 3001 | case 0xC090: /* Cortex-A9 */ |
2964 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | 3002 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; |
@@ -2970,7 +3008,6 @@ init_hw_perf_events(void) | |||
2970 | /* Reset PMNC and read the nb of CNTx counters | 3008 | /* Reset PMNC and read the nb of CNTx counters |
2971 | supported */ | 3009 | supported */ |
2972 | armv7pmu.num_events = armv7_reset_read_pmnc(); | 3010 | armv7pmu.num_events = armv7_reset_read_pmnc(); |
2973 | perf_max_events = armv7pmu.num_events; | ||
2974 | break; | 3011 | break; |
2975 | } | 3012 | } |
2976 | /* Intel CPUs [xscale]. */ | 3013 | /* Intel CPUs [xscale]. */ |
@@ -2981,13 +3018,11 @@ init_hw_perf_events(void) | |||
2981 | armpmu = &xscale1pmu; | 3018 | armpmu = &xscale1pmu; |
2982 | memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, | 3019 | memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, |
2983 | sizeof(xscale_perf_cache_map)); | 3020 | sizeof(xscale_perf_cache_map)); |
2984 | perf_max_events = xscale1pmu.num_events; | ||
2985 | break; | 3021 | break; |
2986 | case 2: | 3022 | case 2: |
2987 | armpmu = &xscale2pmu; | 3023 | armpmu = &xscale2pmu; |
2988 | memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, | 3024 | memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, |
2989 | sizeof(xscale_perf_cache_map)); | 3025 | sizeof(xscale_perf_cache_map)); |
2990 | perf_max_events = xscale2pmu.num_events; | ||
2991 | break; | 3026 | break; |
2992 | } | 3027 | } |
2993 | } | 3028 | } |
@@ -2997,9 +3032,10 @@ init_hw_perf_events(void) | |||
2997 | arm_pmu_names[armpmu->id], armpmu->num_events); | 3032 | arm_pmu_names[armpmu->id], armpmu->num_events); |
2998 | } else { | 3033 | } else { |
2999 | pr_info("no hardware support available\n"); | 3034 | pr_info("no hardware support available\n"); |
3000 | perf_max_events = -1; | ||
3001 | } | 3035 | } |
3002 | 3036 | ||
3037 | perf_pmu_register(&pmu); | ||
3038 | |||
3003 | return 0; | 3039 | return 0; |
3004 | } | 3040 | } |
3005 | arch_initcall(init_hw_perf_events); | 3041 | arch_initcall(init_hw_perf_events); |
@@ -3007,13 +3043,6 @@ arch_initcall(init_hw_perf_events); | |||
3007 | /* | 3043 | /* |
3008 | * Callchain handling code. | 3044 | * Callchain handling code. |
3009 | */ | 3045 | */ |
3010 | static inline void | ||
3011 | callchain_store(struct perf_callchain_entry *entry, | ||
3012 | u64 ip) | ||
3013 | { | ||
3014 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
3015 | entry->ip[entry->nr++] = ip; | ||
3016 | } | ||
3017 | 3046 | ||
3018 | /* | 3047 | /* |
3019 | * The registers we're interested in are at the end of the variable | 3048 | * The registers we're interested in are at the end of the variable |
@@ -3045,7 +3074,7 @@ user_backtrace(struct frame_tail *tail, | |||
3045 | if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) | 3074 | if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) |
3046 | return NULL; | 3075 | return NULL; |
3047 | 3076 | ||
3048 | callchain_store(entry, buftail.lr); | 3077 | perf_callchain_store(entry, buftail.lr); |
3049 | 3078 | ||
3050 | /* | 3079 | /* |
3051 | * Frame pointers should strictly progress back up the stack | 3080 | * Frame pointers should strictly progress back up the stack |
@@ -3057,16 +3086,11 @@ user_backtrace(struct frame_tail *tail, | |||
3057 | return buftail.fp - 1; | 3086 | return buftail.fp - 1; |
3058 | } | 3087 | } |
3059 | 3088 | ||
3060 | static void | 3089 | void |
3061 | perf_callchain_user(struct pt_regs *regs, | 3090 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
3062 | struct perf_callchain_entry *entry) | ||
3063 | { | 3091 | { |
3064 | struct frame_tail *tail; | 3092 | struct frame_tail *tail; |
3065 | 3093 | ||
3066 | callchain_store(entry, PERF_CONTEXT_USER); | ||
3067 | |||
3068 | if (!user_mode(regs)) | ||
3069 | regs = task_pt_regs(current); | ||
3070 | 3094 | ||
3071 | tail = (struct frame_tail *)regs->ARM_fp - 1; | 3095 | tail = (struct frame_tail *)regs->ARM_fp - 1; |
3072 | 3096 | ||
@@ -3084,56 +3108,18 @@ callchain_trace(struct stackframe *fr, | |||
3084 | void *data) | 3108 | void *data) |
3085 | { | 3109 | { |
3086 | struct perf_callchain_entry *entry = data; | 3110 | struct perf_callchain_entry *entry = data; |
3087 | callchain_store(entry, fr->pc); | 3111 | perf_callchain_store(entry, fr->pc); |
3088 | return 0; | 3112 | return 0; |
3089 | } | 3113 | } |
3090 | 3114 | ||
3091 | static void | 3115 | void |
3092 | perf_callchain_kernel(struct pt_regs *regs, | 3116 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
3093 | struct perf_callchain_entry *entry) | ||
3094 | { | 3117 | { |
3095 | struct stackframe fr; | 3118 | struct stackframe fr; |
3096 | 3119 | ||
3097 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
3098 | fr.fp = regs->ARM_fp; | 3120 | fr.fp = regs->ARM_fp; |
3099 | fr.sp = regs->ARM_sp; | 3121 | fr.sp = regs->ARM_sp; |
3100 | fr.lr = regs->ARM_lr; | 3122 | fr.lr = regs->ARM_lr; |
3101 | fr.pc = regs->ARM_pc; | 3123 | fr.pc = regs->ARM_pc; |
3102 | walk_stackframe(&fr, callchain_trace, entry); | 3124 | walk_stackframe(&fr, callchain_trace, entry); |
3103 | } | 3125 | } |
3104 | |||
3105 | static void | ||
3106 | perf_do_callchain(struct pt_regs *regs, | ||
3107 | struct perf_callchain_entry *entry) | ||
3108 | { | ||
3109 | int is_user; | ||
3110 | |||
3111 | if (!regs) | ||
3112 | return; | ||
3113 | |||
3114 | is_user = user_mode(regs); | ||
3115 | |||
3116 | if (!current || !current->pid) | ||
3117 | return; | ||
3118 | |||
3119 | if (is_user && current->state != TASK_RUNNING) | ||
3120 | return; | ||
3121 | |||
3122 | if (!is_user) | ||
3123 | perf_callchain_kernel(regs, entry); | ||
3124 | |||
3125 | if (current->mm) | ||
3126 | perf_callchain_user(regs, entry); | ||
3127 | } | ||
3128 | |||
3129 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
3130 | |||
3131 | struct perf_callchain_entry * | ||
3132 | perf_callchain(struct pt_regs *regs) | ||
3133 | { | ||
3134 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
3135 | |||
3136 | entry->nr = 0; | ||
3137 | perf_do_callchain(regs, entry); | ||
3138 | return entry; | ||
3139 | } | ||
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 5e71ccd5e7d3..1276babf84d5 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = { | |||
426 | .sda_is_open_drain = 1, | 426 | .sda_is_open_drain = 1, |
427 | .scl_pin = AT91_PIN_PA21, | 427 | .scl_pin = AT91_PIN_PA21, |
428 | .scl_is_open_drain = 1, | 428 | .scl_is_open_drain = 1, |
429 | .udelay = 2, /* ~100 kHz */ | 429 | .udelay = 5, /* ~100 kHz */ |
430 | }; | 430 | }; |
431 | 431 | ||
432 | static struct platform_device at91sam9g45_twi0_device = { | 432 | static struct platform_device at91sam9g45_twi0_device = { |
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = { | |||
440 | .sda_is_open_drain = 1, | 440 | .sda_is_open_drain = 1, |
441 | .scl_pin = AT91_PIN_PB11, | 441 | .scl_pin = AT91_PIN_PB11, |
442 | .scl_is_open_drain = 1, | 442 | .scl_is_open_drain = 1, |
443 | .udelay = 2, /* ~100 kHz */ | 443 | .udelay = 5, /* ~100 kHz */ |
444 | }; | 444 | }; |
445 | 445 | ||
446 | static struct platform_device at91sam9g45_twi1_device = { | 446 | static struct platform_device at91sam9g45_twi1_device = { |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 3d996b659ff4..9be261beae7d 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = { | |||
769 | .virtual = SRAM_VIRT, | 769 | .virtual = SRAM_VIRT, |
770 | .pfn = __phys_to_pfn(0x00010000), | 770 | .pfn = __phys_to_pfn(0x00010000), |
771 | .length = SZ_32K, | 771 | .length = SZ_32K, |
772 | /* MT_MEMORY_NONCACHED requires supersection alignment */ | 772 | .type = MT_MEMORY_NONCACHED, |
773 | .type = MT_DEVICE, | ||
774 | }, | 773 | }, |
775 | }; | 774 | }; |
776 | 775 | ||
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 6b6f4c643709..7781e35daec3 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = { | |||
969 | .virtual = SRAM_VIRT, | 969 | .virtual = SRAM_VIRT, |
970 | .pfn = __phys_to_pfn(0x00010000), | 970 | .pfn = __phys_to_pfn(0x00010000), |
971 | .length = SZ_32K, | 971 | .length = SZ_32K, |
972 | /* MT_MEMORY_NONCACHED requires supersection alignment */ | 972 | .type = MT_MEMORY_NONCACHED, |
973 | .type = MT_DEVICE, | ||
974 | }, | 973 | }, |
975 | }; | 974 | }; |
976 | 975 | ||
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 40fec315c99a..5e5b0a7831fb 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = { | |||
653 | .virtual = SRAM_VIRT, | 653 | .virtual = SRAM_VIRT, |
654 | .pfn = __phys_to_pfn(0x00008000), | 654 | .pfn = __phys_to_pfn(0x00008000), |
655 | .length = SZ_16K, | 655 | .length = SZ_16K, |
656 | /* MT_MEMORY_NONCACHED requires supersection alignment */ | 656 | .type = MT_MEMORY_NONCACHED, |
657 | .type = MT_DEVICE, | ||
658 | }, | 657 | }, |
659 | }; | 658 | }; |
660 | 659 | ||
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index e4a3df1872ac..26e8a9c7f50b 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = { | |||
737 | .virtual = SRAM_VIRT, | 737 | .virtual = SRAM_VIRT, |
738 | .pfn = __phys_to_pfn(0x00010000), | 738 | .pfn = __phys_to_pfn(0x00010000), |
739 | .length = SZ_32K, | 739 | .length = SZ_32K, |
740 | /* MT_MEMORY_NONCACHED requires supersection alignment */ | 740 | .type = MT_MEMORY_NONCACHED, |
741 | .type = MT_DEVICE, | ||
742 | }, | 741 | }, |
743 | }; | 742 | }; |
744 | 743 | ||
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h index 3b3e4721ce2e..eb4936ff90ad 100644 --- a/arch/arm/mach-dove/include/mach/io.h +++ b/arch/arm/mach-dove/include/mach/io.h | |||
@@ -13,8 +13,8 @@ | |||
13 | 13 | ||
14 | #define IO_SPACE_LIMIT 0xffffffff | 14 | #define IO_SPACE_LIMIT 0xffffffff |
15 | 15 | ||
16 | #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ | 16 | #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \ |
17 | DOVE_PCIE0_IO_VIRT_BASE)) | 17 | DOVE_PCIE0_IO_VIRT_BASE)) |
18 | #define __mem_pci(a) (a) | 18 | #define __mem_pci(a) (a) |
19 | 19 | ||
20 | #endif | 20 | #endif |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 61cd4d64b985..24498a932ba6 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
@@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys) | |||
503 | return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); | 503 | return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); |
504 | } | 504 | } |
505 | 505 | ||
506 | int dma_set_coherent_mask(struct device *dev, u64 mask) | ||
507 | { | ||
508 | if (mask >= SZ_64M - 1) | ||
509 | return 0; | ||
510 | |||
511 | return -EIO; | ||
512 | } | ||
513 | |||
506 | EXPORT_SYMBOL(ixp4xx_pci_read); | 514 | EXPORT_SYMBOL(ixp4xx_pci_read); |
507 | EXPORT_SYMBOL(ixp4xx_pci_write); | 515 | EXPORT_SYMBOL(ixp4xx_pci_write); |
508 | 516 | ||
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h index f91ca6d4fbe8..8138371c406e 100644 --- a/arch/arm/mach-ixp4xx/include/mach/hardware.h +++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h | |||
@@ -26,6 +26,8 @@ | |||
26 | #define PCIBIOS_MAX_MEM 0x4BFFFFFF | 26 | #define PCIBIOS_MAX_MEM 0x4BFFFFFF |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #define ARCH_HAS_DMA_SET_COHERENT_MASK | ||
30 | |||
29 | #define pcibios_assign_all_busses() 1 | 31 | #define pcibios_assign_all_busses() 1 |
30 | 32 | ||
31 | /* Register locations and bits */ | 33 | /* Register locations and bits */ |
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h index 93fc2ec95e76..6e924b398919 100644 --- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h +++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 | 39 | #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 |
40 | #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 | 40 | #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 |
41 | #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 | 41 | #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000 |
42 | #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M | 42 | #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M |
43 | 43 | ||
44 | #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000 | 44 | #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000 |
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c index 55e7f00836b7..513ad3102d7c 100644 --- a/arch/arm/mach-kirkwood/pcie.c +++ b/arch/arm/mach-kirkwood/pcie.c | |||
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp) | |||
117 | * IORESOURCE_IO | 117 | * IORESOURCE_IO |
118 | */ | 118 | */ |
119 | pp->res[0].name = "PCIe 0 I/O Space"; | 119 | pp->res[0].name = "PCIe 0 I/O Space"; |
120 | pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; | 120 | pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE; |
121 | pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; | 121 | pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; |
122 | pp->res[0].flags = IORESOURCE_IO; | 122 | pp->res[0].flags = IORESOURCE_IO; |
123 | 123 | ||
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp) | |||
139 | * IORESOURCE_IO | 139 | * IORESOURCE_IO |
140 | */ | 140 | */ |
141 | pp->res[0].name = "PCIe 1 I/O Space"; | 141 | pp->res[0].name = "PCIe 1 I/O Space"; |
142 | pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; | 142 | pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE; |
143 | pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; | 143 | pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; |
144 | pp->res[0].flags = IORESOURCE_IO; | 144 | pp->res[0].flags = IORESOURCE_IO; |
145 | 145 | ||
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h index 4f5b0e0ce6cf..1a8a25edb1b4 100644 --- a/arch/arm/mach-mmp/include/mach/system.h +++ b/arch/arm/mach-mmp/include/mach/system.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #ifndef __ASM_MACH_SYSTEM_H | 9 | #ifndef __ASM_MACH_SYSTEM_H |
10 | #define __ASM_MACH_SYSTEM_H | 10 | #define __ASM_MACH_SYSTEM_H |
11 | 11 | ||
12 | #include <mach/cputype.h> | ||
13 | |||
12 | static inline void arch_idle(void) | 14 | static inline void arch_idle(void) |
13 | { | 15 | { |
14 | cpu_do_idle(); | 16 | cpu_do_idle(); |
@@ -16,6 +18,9 @@ static inline void arch_idle(void) | |||
16 | 18 | ||
17 | static inline void arch_reset(char mode, const char *cmd) | 19 | static inline void arch_reset(char mode, const char *cmd) |
18 | { | 20 | { |
19 | cpu_reset(0); | 21 | if (cpu_is_pxa168()) |
22 | cpu_reset(0xffff0000); | ||
23 | else | ||
24 | cpu_reset(0); | ||
20 | } | 25 | } |
21 | #endif /* __ASM_MACH_SYSTEM_H */ | 26 | #endif /* __ASM_MACH_SYSTEM_H */ |
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c index 50d5939a78f1..58093d9e07be 100644 --- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c +++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c | |||
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, | |||
312 | freqs.cpu = policy->cpu; | 312 | freqs.cpu = policy->cpu; |
313 | 313 | ||
314 | if (freq_debug) | 314 | if (freq_debug) |
315 | pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " | 315 | pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", |
316 | "(SDRAM %d Mhz)\n", | ||
317 | freqs.new / 1000, (pxa_freq_settings[idx].div2) ? | 316 | freqs.new / 1000, (pxa_freq_settings[idx].div2) ? |
318 | (new_freq_mem / 2000) : (new_freq_mem / 1000)); | 317 | (new_freq_mem / 2000) : (new_freq_mem / 1000)); |
319 | 318 | ||
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h index 7f64d24cd564..814f1458a06a 100644 --- a/arch/arm/mach-pxa/include/mach/hardware.h +++ b/arch/arm/mach-pxa/include/mach/hardware.h | |||
@@ -264,23 +264,35 @@ | |||
264 | * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x | 264 | * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x |
265 | * == 0x3 for pxa300/pxa310/pxa320 | 265 | * == 0x3 for pxa300/pxa310/pxa320 |
266 | */ | 266 | */ |
267 | #if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) | ||
267 | #define __cpu_is_pxa2xx(id) \ | 268 | #define __cpu_is_pxa2xx(id) \ |
268 | ({ \ | 269 | ({ \ |
269 | unsigned int _id = (id) >> 13 & 0x7; \ | 270 | unsigned int _id = (id) >> 13 & 0x7; \ |
270 | _id <= 0x2; \ | 271 | _id <= 0x2; \ |
271 | }) | 272 | }) |
273 | #else | ||
274 | #define __cpu_is_pxa2xx(id) (0) | ||
275 | #endif | ||
272 | 276 | ||
277 | #ifdef CONFIG_PXA3xx | ||
273 | #define __cpu_is_pxa3xx(id) \ | 278 | #define __cpu_is_pxa3xx(id) \ |
274 | ({ \ | 279 | ({ \ |
275 | unsigned int _id = (id) >> 13 & 0x7; \ | 280 | unsigned int _id = (id) >> 13 & 0x7; \ |
276 | _id == 0x3; \ | 281 | _id == 0x3; \ |
277 | }) | 282 | }) |
283 | #else | ||
284 | #define __cpu_is_pxa3xx(id) (0) | ||
285 | #endif | ||
278 | 286 | ||
287 | #if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935) | ||
279 | #define __cpu_is_pxa93x(id) \ | 288 | #define __cpu_is_pxa93x(id) \ |
280 | ({ \ | 289 | ({ \ |
281 | unsigned int _id = (id) >> 4 & 0xfff; \ | 290 | unsigned int _id = (id) >> 4 & 0xfff; \ |
282 | _id == 0x683 || _id == 0x693; \ | 291 | _id == 0x683 || _id == 0x693; \ |
283 | }) | 292 | }) |
293 | #else | ||
294 | #define __cpu_is_pxa93x(id) (0) | ||
295 | #endif | ||
284 | 296 | ||
285 | #define cpu_is_pxa2xx() \ | 297 | #define cpu_is_pxa2xx() \ |
286 | ({ \ | 298 | ({ \ |
@@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void); | |||
309 | #define PCIBIOS_MIN_IO 0 | 321 | #define PCIBIOS_MIN_IO 0 |
310 | #define PCIBIOS_MIN_MEM 0 | 322 | #define PCIBIOS_MIN_MEM 0 |
311 | #define pcibios_assign_all_busses() 1 | 323 | #define pcibios_assign_all_busses() 1 |
324 | #define ARCH_HAS_DMA_SET_COHERENT_MASK | ||
312 | #endif | 325 | #endif |
313 | 326 | ||
314 | |||
315 | #endif /* _ASM_ARCH_HARDWARE_H */ | 327 | #endif /* _ASM_ARCH_HARDWARE_H */ |
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h index 262691fb97d8..fdca3be47d9b 100644 --- a/arch/arm/mach-pxa/include/mach/io.h +++ b/arch/arm/mach-pxa/include/mach/io.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #ifndef __ASM_ARM_ARCH_IO_H | 6 | #ifndef __ASM_ARM_ARCH_IO_H |
7 | #define __ASM_ARM_ARCH_IO_H | 7 | #define __ASM_ARM_ARCH_IO_H |
8 | 8 | ||
9 | #include <mach/hardware.h> | ||
10 | |||
9 | #define IO_SPACE_LIMIT 0xffffffff | 11 | #define IO_SPACE_LIMIT 0xffffffff |
10 | 12 | ||
11 | /* | 13 | /* |
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 77ad6d34ab5b..405b92a29793 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c | |||
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = { | |||
469 | }, | 469 | }, |
470 | }; | 470 | }; |
471 | 471 | ||
472 | static struct i2c_pxa_platform_data palm27x_i2c_power_info = { | ||
473 | .use_pio = 1, | ||
474 | }; | ||
475 | |||
472 | void __init palm27x_pmic_init(void) | 476 | void __init palm27x_pmic_init(void) |
473 | { | 477 | { |
474 | i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); | 478 | i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); |
475 | pxa27x_set_i2c_power_info(NULL); | 479 | pxa27x_set_i2c_power_info(&palm27x_i2c_power_info); |
476 | } | 480 | } |
477 | #endif | 481 | #endif |
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c index c9b747cedea8..37d6173bbb66 100644 --- a/arch/arm/mach-pxa/vpac270.c +++ b/arch/arm/mach-pxa/vpac270.c | |||
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {} | |||
240 | #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) | 240 | #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) |
241 | static struct pxamci_platform_data vpac270_mci_platform_data = { | 241 | static struct pxamci_platform_data vpac270_mci_platform_data = { |
242 | .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, | 242 | .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, |
243 | .gpio_power = -1, | ||
243 | .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, | 244 | .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, |
244 | .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, | 245 | .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, |
245 | .detect_delay_ms = 200, | 246 | .detect_delay_ms = 200, |
diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h index 7b1fc984abb6..d5a71abcbaea 100644 --- a/arch/arm/mach-u300/include/mach/gpio.h +++ b/arch/arm/mach-u300/include/mach/gpio.h | |||
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value); | |||
273 | extern int gpio_get_value(unsigned gpio); | 273 | extern int gpio_get_value(unsigned gpio); |
274 | extern void gpio_set_value(unsigned gpio, int value); | 274 | extern void gpio_set_value(unsigned gpio, int value); |
275 | 275 | ||
276 | #define gpio_get_value_cansleep gpio_get_value | ||
277 | #define gpio_set_value_cansleep gpio_set_value | ||
278 | |||
276 | /* wrappers to sleep-enable the previous two functions */ | 279 | /* wrappers to sleep-enable the previous two functions */ |
277 | static inline unsigned gpio_to_irq(unsigned gpio) | 280 | static inline unsigned gpio_to_irq(unsigned gpio) |
278 | { | 281 | { |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 577df6cccb08..efb127022d42 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void) | |||
227 | int i; | 227 | int i; |
228 | 228 | ||
229 | #ifdef CONFIG_CACHE_L2X0 | 229 | #ifdef CONFIG_CACHE_L2X0 |
230 | l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); | 230 | void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC); |
231 | |||
232 | /* set RAM latencies to 1 cycle for this core tile. */ | ||
233 | writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); | ||
234 | writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); | ||
235 | |||
236 | l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); | ||
231 | #endif | 237 | #endif |
232 | 238 | ||
233 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); | 239 | clkdev_add_table(lookups, ARRAY_SIZE(lookups)); |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index d073b64ae87e..724ba3bce72c 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
885 | 885 | ||
886 | if (ai_usermode & UM_SIGNAL) | 886 | if (ai_usermode & UM_SIGNAL) |
887 | force_sig(SIGBUS, current); | 887 | force_sig(SIGBUS, current); |
888 | else | 888 | else { |
889 | set_cr(cr_no_alignment); | 889 | /* |
890 | * We're about to disable the alignment trap and return to | ||
891 | * user space. But if an interrupt occurs before actually | ||
892 | * reaching user space, then the IRQ vector entry code will | ||
893 | * notice that we were still in kernel space and therefore | ||
894 | * the alignment trap won't be re-enabled in that case as it | ||
895 | * is presumed to be always on from kernel space. | ||
896 | * Let's prevent that race by disabling interrupts here (they | ||
897 | * are disabled on the way back to user space anyway in | ||
898 | * entry-common.S) and disable the alignment trap only if | ||
899 | * there is no work pending for this thread. | ||
900 | */ | ||
901 | raw_local_irq_disable(); | ||
902 | if (!(current_thread_info()->flags & _TIF_WORK_MASK)) | ||
903 | set_cr(cr_no_alignment); | ||
904 | } | ||
890 | 905 | ||
891 | return 0; | 906 | return 0; |
892 | } | 907 | } |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6e1c4f6a2b3f..6a3a2d0cd6db 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/sort.h> | 17 | #include <linux/sort.h> |
18 | #include <linux/fs.h> | ||
18 | 19 | ||
19 | #include <asm/cputype.h> | 20 | #include <asm/cputype.h> |
20 | #include <asm/sections.h> | 21 | #include <asm/sections.h> |
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = { | |||
246 | .domain = DOMAIN_USER, | 247 | .domain = DOMAIN_USER, |
247 | }, | 248 | }, |
248 | [MT_MEMORY] = { | 249 | [MT_MEMORY] = { |
250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
251 | L_PTE_USER | L_PTE_EXEC, | ||
252 | .prot_l1 = PMD_TYPE_TABLE, | ||
249 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
250 | .domain = DOMAIN_KERNEL, | 254 | .domain = DOMAIN_KERNEL, |
251 | }, | 255 | }, |
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = { | |||
254 | .domain = DOMAIN_KERNEL, | 258 | .domain = DOMAIN_KERNEL, |
255 | }, | 259 | }, |
256 | [MT_MEMORY_NONCACHED] = { | 260 | [MT_MEMORY_NONCACHED] = { |
261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
262 | L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, | ||
263 | .prot_l1 = PMD_TYPE_TABLE, | ||
257 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
258 | .domain = DOMAIN_KERNEL, | 265 | .domain = DOMAIN_KERNEL, |
259 | }, | 266 | }, |
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void) | |||
411 | * Enable CPU-specific coherency if supported. | 418 | * Enable CPU-specific coherency if supported. |
412 | * (Only available on XSC3 at the moment.) | 419 | * (Only available on XSC3 at the moment.) |
413 | */ | 420 | */ |
414 | if (arch_is_coherent() && cpu_is_xsc3()) | 421 | if (arch_is_coherent() && cpu_is_xsc3()) { |
415 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 422 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
416 | 423 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | |
424 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | ||
425 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | ||
426 | } | ||
417 | /* | 427 | /* |
418 | * ARMv6 and above have extended page tables. | 428 | * ARMv6 and above have extended page tables. |
419 | */ | 429 | */ |
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void) | |||
438 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | 448 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; |
439 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | 449 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; |
440 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 450 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
451 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | ||
441 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 452 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; |
453 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | ||
442 | #endif | 454 | #endif |
443 | } | 455 | } |
444 | 456 | ||
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void) | |||
475 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 487 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
476 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 488 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
477 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 489 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; |
490 | mem_types[MT_MEMORY].prot_pte |= kern_pgprot; | ||
491 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; | ||
478 | mem_types[MT_ROM].prot_sect |= cp->pmd; | 492 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
479 | 493 | ||
480 | switch (cp->pmd) { | 494 | switch (cp->pmd) { |
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void) | |||
498 | } | 512 | } |
499 | } | 513 | } |
500 | 514 | ||
515 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
516 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
517 | unsigned long size, pgprot_t vma_prot) | ||
518 | { | ||
519 | if (!pfn_valid(pfn)) | ||
520 | return pgprot_noncached(vma_prot); | ||
521 | else if (file->f_flags & O_SYNC) | ||
522 | return pgprot_writecombine(vma_prot); | ||
523 | return vma_prot; | ||
524 | } | ||
525 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
526 | #endif | ||
527 | |||
501 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | 528 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
502 | 529 | ||
503 | static void __init *early_alloc(unsigned long sz) | 530 | static void __init *early_alloc(unsigned long sz) |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6a8506d99ee9..7563ff0141bd 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -186,13 +186,14 @@ cpu_v7_name: | |||
186 | * It is assumed that: | 186 | * It is assumed that: |
187 | * - cache type register is implemented | 187 | * - cache type register is implemented |
188 | */ | 188 | */ |
189 | __v7_setup: | 189 | __v7_ca9mp_setup: |
190 | #ifdef CONFIG_SMP | 190 | #ifdef CONFIG_SMP |
191 | mrc p15, 0, r0, c1, c0, 1 | 191 | mrc p15, 0, r0, c1, c0, 1 |
192 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? | 192 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? |
193 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and | 193 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and |
194 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting | 194 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting |
195 | #endif | 195 | #endif |
196 | __v7_setup: | ||
196 | adr r12, __v7_setup_stack @ the local stack | 197 | adr r12, __v7_setup_stack @ the local stack |
197 | stmia r12, {r0-r5, r7, r9, r11, lr} | 198 | stmia r12, {r0-r5, r7, r9, r11, lr} |
198 | bl v7_flush_dcache_all | 199 | bl v7_flush_dcache_all |
@@ -201,11 +202,16 @@ __v7_setup: | |||
201 | mrc p15, 0, r0, c0, c0, 0 @ read main ID register | 202 | mrc p15, 0, r0, c0, c0, 0 @ read main ID register |
202 | and r10, r0, #0xff000000 @ ARM? | 203 | and r10, r0, #0xff000000 @ ARM? |
203 | teq r10, #0x41000000 | 204 | teq r10, #0x41000000 |
204 | bne 2f | 205 | bne 3f |
205 | and r5, r0, #0x00f00000 @ variant | 206 | and r5, r0, #0x00f00000 @ variant |
206 | and r6, r0, #0x0000000f @ revision | 207 | and r6, r0, #0x0000000f @ revision |
207 | orr r0, r6, r5, lsr #20-4 @ combine variant and revision | 208 | orr r6, r6, r5, lsr #20-4 @ combine variant and revision |
209 | ubfx r0, r0, #4, #12 @ primary part number | ||
208 | 210 | ||
211 | /* Cortex-A8 Errata */ | ||
212 | ldr r10, =0x00000c08 @ Cortex-A8 primary part number | ||
213 | teq r0, r10 | ||
214 | bne 2f | ||
209 | #ifdef CONFIG_ARM_ERRATA_430973 | 215 | #ifdef CONFIG_ARM_ERRATA_430973 |
210 | teq r5, #0x00100000 @ only present in r1p* | 216 | teq r5, #0x00100000 @ only present in r1p* |
211 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register | 217 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register |
@@ -213,21 +219,42 @@ __v7_setup: | |||
213 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register | 219 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register |
214 | #endif | 220 | #endif |
215 | #ifdef CONFIG_ARM_ERRATA_458693 | 221 | #ifdef CONFIG_ARM_ERRATA_458693 |
216 | teq r0, #0x20 @ only present in r2p0 | 222 | teq r6, #0x20 @ only present in r2p0 |
217 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register | 223 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register |
218 | orreq r10, r10, #(1 << 5) @ set L1NEON to 1 | 224 | orreq r10, r10, #(1 << 5) @ set L1NEON to 1 |
219 | orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 | 225 | orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 |
220 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register | 226 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register |
221 | #endif | 227 | #endif |
222 | #ifdef CONFIG_ARM_ERRATA_460075 | 228 | #ifdef CONFIG_ARM_ERRATA_460075 |
223 | teq r0, #0x20 @ only present in r2p0 | 229 | teq r6, #0x20 @ only present in r2p0 |
224 | mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register | 230 | mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register |
225 | tsteq r10, #1 << 22 | 231 | tsteq r10, #1 << 22 |
226 | orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit | 232 | orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit |
227 | mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register | 233 | mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register |
228 | #endif | 234 | #endif |
235 | b 3f | ||
236 | |||
237 | /* Cortex-A9 Errata */ | ||
238 | 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number | ||
239 | teq r0, r10 | ||
240 | bne 3f | ||
241 | #ifdef CONFIG_ARM_ERRATA_742230 | ||
242 | cmp r6, #0x22 @ only present up to r2p2 | ||
243 | mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
244 | orrle r10, r10, #1 << 4 @ set bit #4 | ||
245 | mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
246 | #endif | ||
247 | #ifdef CONFIG_ARM_ERRATA_742231 | ||
248 | teq r6, #0x20 @ present in r2p0 | ||
249 | teqne r6, #0x21 @ present in r2p1 | ||
250 | teqne r6, #0x22 @ present in r2p2 | ||
251 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
252 | orreq r10, r10, #1 << 12 @ set bit #12 | ||
253 | orreq r10, r10, #1 << 22 @ set bit #22 | ||
254 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
255 | #endif | ||
229 | 256 | ||
230 | 2: mov r10, #0 | 257 | 3: mov r10, #0 |
231 | #ifdef HARVARD_CACHE | 258 | #ifdef HARVARD_CACHE |
232 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate | 259 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate |
233 | #endif | 260 | #endif |
@@ -323,6 +350,29 @@ cpu_elf_name: | |||
323 | 350 | ||
324 | .section ".proc.info.init", #alloc, #execinstr | 351 | .section ".proc.info.init", #alloc, #execinstr |
325 | 352 | ||
353 | .type __v7_ca9mp_proc_info, #object | ||
354 | __v7_ca9mp_proc_info: | ||
355 | .long 0x410fc090 @ Required ID value | ||
356 | .long 0xff0ffff0 @ Mask for ID | ||
357 | .long PMD_TYPE_SECT | \ | ||
358 | PMD_SECT_AP_WRITE | \ | ||
359 | PMD_SECT_AP_READ | \ | ||
360 | PMD_FLAGS | ||
361 | .long PMD_TYPE_SECT | \ | ||
362 | PMD_SECT_XN | \ | ||
363 | PMD_SECT_AP_WRITE | \ | ||
364 | PMD_SECT_AP_READ | ||
365 | b __v7_ca9mp_setup | ||
366 | .long cpu_arch_name | ||
367 | .long cpu_elf_name | ||
368 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
369 | .long cpu_v7_name | ||
370 | .long v7_processor_functions | ||
371 | .long v7wbi_tlb_fns | ||
372 | .long v6_user_fns | ||
373 | .long v7_cache_fns | ||
374 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info | ||
375 | |||
326 | /* | 376 | /* |
327 | * Match any ARMv7 processor core. | 377 | * Match any ARMv7 processor core. |
328 | */ | 378 | */ |
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c index ea3ca86c5283..aedf9c1d645e 100644 --- a/arch/arm/plat-nomadik/timer.c +++ b/arch/arm/plat-nomadik/timer.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/arm/mach-nomadik/timer.c | 2 | * linux/arch/arm/plat-nomadik/timer.c |
3 | * | 3 | * |
4 | * Copyright (C) 2008 STMicroelectronics | 4 | * Copyright (C) 2008 STMicroelectronics |
5 | * Copyright (C) 2010 Alessandro Rubini | 5 | * Copyright (C) 2010 Alessandro Rubini |
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode, | |||
75 | cr = readl(mtu_base + MTU_CR(1)); | 75 | cr = readl(mtu_base + MTU_CR(1)); |
76 | writel(0, mtu_base + MTU_LR(1)); | 76 | writel(0, mtu_base + MTU_LR(1)); |
77 | writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); | 77 | writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); |
78 | writel(0x2, mtu_base + MTU_IMSC); | 78 | writel(1 << 1, mtu_base + MTU_IMSC); |
79 | break; | 79 | break; |
80 | case CLOCK_EVT_MODE_SHUTDOWN: | 80 | case CLOCK_EVT_MODE_SHUTDOWN: |
81 | case CLOCK_EVT_MODE_UNUSED: | 81 | case CLOCK_EVT_MODE_UNUSED: |
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void) | |||
131 | { | 131 | { |
132 | unsigned long rate; | 132 | unsigned long rate; |
133 | struct clk *clk0; | 133 | struct clk *clk0; |
134 | struct clk *clk1; | 134 | u32 cr = MTU_CRn_32BITS; |
135 | u32 cr; | ||
136 | 135 | ||
137 | clk0 = clk_get_sys("mtu0", NULL); | 136 | clk0 = clk_get_sys("mtu0", NULL); |
138 | BUG_ON(IS_ERR(clk0)); | 137 | BUG_ON(IS_ERR(clk0)); |
139 | 138 | ||
140 | clk1 = clk_get_sys("mtu1", NULL); | ||
141 | BUG_ON(IS_ERR(clk1)); | ||
142 | |||
143 | clk_enable(clk0); | 139 | clk_enable(clk0); |
144 | clk_enable(clk1); | ||
145 | 140 | ||
146 | /* | 141 | /* |
147 | * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: | 142 | * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz |
148 | * use a divide-by-16 counter if it's more than 16MHz | 143 | * for ux500. |
144 | * Use a divide-by-16 counter if the tick rate is more than 32MHz. | ||
145 | * At 32 MHz, the timer (with 32 bit counter) can be programmed | ||
146 | * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer | ||
147 | * with 16 gives too low timer resolution. | ||
149 | */ | 148 | */ |
150 | cr = MTU_CRn_32BITS;; | ||
151 | rate = clk_get_rate(clk0); | 149 | rate = clk_get_rate(clk0); |
152 | if (rate > 16 << 20) { | 150 | if (rate > 32000000) { |
153 | rate /= 16; | 151 | rate /= 16; |
154 | cr |= MTU_CRn_PRESCALE_16; | 152 | cr |= MTU_CRn_PRESCALE_16; |
155 | } else { | 153 | } else { |
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void) | |||
170 | pr_err("timer: failed to initialize clock source %s\n", | 168 | pr_err("timer: failed to initialize clock source %s\n", |
171 | nmdk_clksrc.name); | 169 | nmdk_clksrc.name); |
172 | 170 | ||
173 | /* Timer 1 is used for events, fix according to rate */ | 171 | /* Timer 1 is used for events */ |
174 | cr = MTU_CRn_32BITS; | 172 | |
175 | rate = clk_get_rate(clk1); | ||
176 | if (rate > 16 << 20) { | ||
177 | rate /= 16; | ||
178 | cr |= MTU_CRn_PRESCALE_16; | ||
179 | } else { | ||
180 | cr |= MTU_CRn_PRESCALE_1; | ||
181 | } | ||
182 | clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); | 173 | clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); |
183 | 174 | ||
184 | writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ | 175 | writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ |
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index e39a417a368d..a92cb499313f 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig | |||
@@ -33,7 +33,7 @@ config OMAP_DEBUG_DEVICES | |||
33 | config OMAP_DEBUG_LEDS | 33 | config OMAP_DEBUG_LEDS |
34 | bool | 34 | bool |
35 | depends on OMAP_DEBUG_DEVICES | 35 | depends on OMAP_DEBUG_DEVICES |
36 | default y if LEDS | 36 | default y if LEDS_CLASS |
37 | 37 | ||
38 | config OMAP_RESET_CLOCKS | 38 | config OMAP_RESET_CLOCKS |
39 | bool "Reset unused clocks during boot" | 39 | bool "Reset unused clocks during boot" |
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index e31496e35b0f..0c8612fd8312 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c | |||
@@ -156,7 +156,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id) | |||
156 | /* Writing zero to RSYNC_ERR clears the IRQ */ | 156 | /* Writing zero to RSYNC_ERR clears the IRQ */ |
157 | MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); | 157 | MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); |
158 | } else { | 158 | } else { |
159 | complete(&mcbsp_rx->tx_irq_completion); | 159 | complete(&mcbsp_rx->rx_irq_completion); |
160 | } | 160 | } |
161 | 161 | ||
162 | return IRQ_HANDLED; | 162 | return IRQ_HANDLED; |
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 226b2e858d6c..10b3b4c63372 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c | |||
@@ -220,20 +220,7 @@ void __init omap_map_sram(void) | |||
220 | if (omap_sram_size == 0) | 220 | if (omap_sram_size == 0) |
221 | return; | 221 | return; |
222 | 222 | ||
223 | if (cpu_is_omap24xx()) { | ||
224 | omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA; | ||
225 | |||
226 | base = OMAP2_SRAM_PA; | ||
227 | base = ROUND_DOWN(base, PAGE_SIZE); | ||
228 | omap_sram_io_desc[0].pfn = __phys_to_pfn(base); | ||
229 | } | ||
230 | |||
231 | if (cpu_is_omap34xx()) { | 223 | if (cpu_is_omap34xx()) { |
232 | omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA; | ||
233 | base = OMAP3_SRAM_PA; | ||
234 | base = ROUND_DOWN(base, PAGE_SIZE); | ||
235 | omap_sram_io_desc[0].pfn = __phys_to_pfn(base); | ||
236 | |||
237 | /* | 224 | /* |
238 | * SRAM must be marked as non-cached on OMAP3 since the | 225 | * SRAM must be marked as non-cached on OMAP3 since the |
239 | * CORE DPLL M2 divider change code (in SRAM) runs with the | 226 | * CORE DPLL M2 divider change code (in SRAM) runs with the |
@@ -244,13 +231,11 @@ void __init omap_map_sram(void) | |||
244 | omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; | 231 | omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; |
245 | } | 232 | } |
246 | 233 | ||
247 | if (cpu_is_omap44xx()) { | 234 | omap_sram_io_desc[0].virtual = omap_sram_base; |
248 | omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; | 235 | base = omap_sram_start; |
249 | base = OMAP4_SRAM_PA; | 236 | base = ROUND_DOWN(base, PAGE_SIZE); |
250 | base = ROUND_DOWN(base, PAGE_SIZE); | 237 | omap_sram_io_desc[0].pfn = __phys_to_pfn(base); |
251 | omap_sram_io_desc[0].pfn = __phys_to_pfn(base); | 238 | omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); |
252 | } | ||
253 | omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */ | ||
254 | iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); | 239 | iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); |
255 | 240 | ||
256 | printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", | 241 | printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", |
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index 98f94d041d9c..a727f54d64d6 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c | |||
@@ -314,10 +314,9 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
314 | vfree(module->arch.syminfo); | 314 | vfree(module->arch.syminfo); |
315 | module->arch.syminfo = NULL; | 315 | module->arch.syminfo = NULL; |
316 | 316 | ||
317 | return module_bug_finalize(hdr, sechdrs, module); | 317 | return 0; |
318 | } | 318 | } |
319 | 319 | ||
320 | void module_arch_cleanup(struct module *module) | 320 | void module_arch_cleanup(struct module *module) |
321 | { | 321 | { |
322 | module_bug_cleanup(module); | ||
323 | } | 322 | } |
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c index 0865e291c20d..db4953dc4e1b 100644 --- a/arch/h8300/kernel/module.c +++ b/arch/h8300/kernel/module.c | |||
@@ -112,10 +112,9 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
112 | const Elf_Shdr *sechdrs, | 112 | const Elf_Shdr *sechdrs, |
113 | struct module *me) | 113 | struct module *me) |
114 | { | 114 | { |
115 | return module_bug_finalize(hdr, sechdrs, me); | 115 | return 0; |
116 | } | 116 | } |
117 | 117 | ||
118 | void module_arch_cleanup(struct module *mod) | 118 | void module_arch_cleanup(struct module *mod) |
119 | { | 119 | { |
120 | module_bug_cleanup(mod); | ||
121 | } | 120 | } |
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h index 9c1acb2b1a92..b2eeb0de1c8d 100644 --- a/arch/m32r/include/asm/signal.h +++ b/arch/m32r/include/asm/signal.h | |||
@@ -157,7 +157,6 @@ typedef struct sigaltstack { | |||
157 | #undef __HAVE_ARCH_SIG_BITOPS | 157 | #undef __HAVE_ARCH_SIG_BITOPS |
158 | 158 | ||
159 | struct pt_regs; | 159 | struct pt_regs; |
160 | extern int do_signal(struct pt_regs *regs, sigset_t *oldset); | ||
161 | 160 | ||
162 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | 161 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) |
163 | 162 | ||
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h index 76125777483c..c70545689da8 100644 --- a/arch/m32r/include/asm/unistd.h +++ b/arch/m32r/include/asm/unistd.h | |||
@@ -351,6 +351,7 @@ | |||
351 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ | 351 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ |
352 | #define __ARCH_WANT_SYS_OLDUMOUNT | 352 | #define __ARCH_WANT_SYS_OLDUMOUNT |
353 | #define __ARCH_WANT_SYS_RT_SIGACTION | 353 | #define __ARCH_WANT_SYS_RT_SIGACTION |
354 | #define __ARCH_WANT_SYS_RT_SIGSUSPEND | ||
354 | 355 | ||
355 | #define __IGNORE_lchown | 356 | #define __IGNORE_lchown |
356 | #define __IGNORE_setuid | 357 | #define __IGNORE_setuid |
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 403869833b98..225412bc227e 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S | |||
@@ -235,10 +235,9 @@ work_resched: | |||
235 | work_notifysig: ; deal with pending signals and | 235 | work_notifysig: ; deal with pending signals and |
236 | ; notify-resume requests | 236 | ; notify-resume requests |
237 | mv r0, sp ; arg1 : struct pt_regs *regs | 237 | mv r0, sp ; arg1 : struct pt_regs *regs |
238 | ldi r1, #0 ; arg2 : sigset_t *oldset | 238 | mv r1, r9 ; arg2 : __u32 thread_info_flags |
239 | mv r2, r9 ; arg3 : __u32 thread_info_flags | ||
240 | bl do_notify_resume | 239 | bl do_notify_resume |
241 | bra restore_all | 240 | bra resume_userspace |
242 | 241 | ||
243 | ; perform syscall exit tracing | 242 | ; perform syscall exit tracing |
244 | ALIGN | 243 | ALIGN |
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index e555091eb97c..0021ade4cba8 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c | |||
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child) | |||
592 | 592 | ||
593 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) | 593 | if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) |
594 | != sizeof(insn)) | 594 | != sizeof(insn)) |
595 | break; | 595 | return -EIO; |
596 | 596 | ||
597 | compute_next_pc(insn, pc, &next_pc, child); | 597 | compute_next_pc(insn, pc, &next_pc, child); |
598 | if (next_pc & 0x80000000) | 598 | if (next_pc & 0x80000000) |
599 | break; | 599 | return -EIO; |
600 | 600 | ||
601 | if (embed_debug_trap(child, next_pc)) | 601 | if (embed_debug_trap(child, next_pc)) |
602 | break; | 602 | return -EIO; |
603 | 603 | ||
604 | invalidate_cache(); | 604 | invalidate_cache(); |
605 | return 0; | ||
605 | } | 606 | } |
606 | 607 | ||
607 | void user_disable_single_step(struct task_struct *child) | 608 | void user_disable_single_step(struct task_struct *child) |
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 144b0f124fc7..7bbe38645ed5 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c | |||
@@ -28,37 +28,6 @@ | |||
28 | 28 | ||
29 | #define DEBUG_SIG 0 | 29 | #define DEBUG_SIG 0 |
30 | 30 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
32 | |||
33 | int do_signal(struct pt_regs *, sigset_t *); | ||
34 | |||
35 | asmlinkage int | ||
36 | sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, | ||
37 | unsigned long r2, unsigned long r3, unsigned long r4, | ||
38 | unsigned long r5, unsigned long r6, struct pt_regs *regs) | ||
39 | { | ||
40 | sigset_t newset; | ||
41 | |||
42 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
43 | if (sigsetsize != sizeof(sigset_t)) | ||
44 | return -EINVAL; | ||
45 | |||
46 | if (copy_from_user(&newset, unewset, sizeof(newset))) | ||
47 | return -EFAULT; | ||
48 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
49 | |||
50 | spin_lock_irq(¤t->sighand->siglock); | ||
51 | current->saved_sigmask = current->blocked; | ||
52 | current->blocked = newset; | ||
53 | recalc_sigpending(); | ||
54 | spin_unlock_irq(¤t->sighand->siglock); | ||
55 | |||
56 | current->state = TASK_INTERRUPTIBLE; | ||
57 | schedule(); | ||
58 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
59 | return -ERESTARTNOHAND; | ||
60 | } | ||
61 | |||
62 | asmlinkage int | 31 | asmlinkage int |
63 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 32 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
64 | unsigned long r2, unsigned long r3, unsigned long r4, | 33 | unsigned long r2, unsigned long r3, unsigned long r4, |
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | |||
218 | return (void __user *)((sp - frame_size) & -8ul); | 187 | return (void __user *)((sp - frame_size) & -8ul); |
219 | } | 188 | } |
220 | 189 | ||
221 | static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 190 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
222 | sigset_t *set, struct pt_regs *regs) | 191 | sigset_t *set, struct pt_regs *regs) |
223 | { | 192 | { |
224 | struct rt_sigframe __user *frame; | 193 | struct rt_sigframe __user *frame; |
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
275 | current->comm, current->pid, frame, regs->pc); | 244 | current->comm, current->pid, frame, regs->pc); |
276 | #endif | 245 | #endif |
277 | 246 | ||
278 | return; | 247 | return 0; |
279 | 248 | ||
280 | give_sigsegv: | 249 | give_sigsegv: |
281 | force_sigsegv(sig, current); | 250 | force_sigsegv(sig, current); |
251 | return -EFAULT; | ||
252 | } | ||
253 | |||
254 | static int prev_insn(struct pt_regs *regs) | ||
255 | { | ||
256 | u16 inst; | ||
257 | if (get_user(&inst, (u16 __user *)(regs->bpc - 2))) | ||
258 | return -EFAULT; | ||
259 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | ||
260 | regs->bpc -= 2; | ||
261 | else | ||
262 | regs->bpc -= 4; | ||
263 | regs->syscall_nr = -1; | ||
264 | return 0; | ||
282 | } | 265 | } |
283 | 266 | ||
284 | /* | 267 | /* |
285 | * OK, we're invoking a handler | 268 | * OK, we're invoking a handler |
286 | */ | 269 | */ |
287 | 270 | ||
288 | static void | 271 | static int |
289 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 272 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, |
290 | sigset_t *oldset, struct pt_regs *regs) | 273 | sigset_t *oldset, struct pt_regs *regs) |
291 | { | 274 | { |
292 | unsigned short inst; | ||
293 | |||
294 | /* Are we from a system call? */ | 275 | /* Are we from a system call? */ |
295 | if (regs->syscall_nr >= 0) { | 276 | if (regs->syscall_nr >= 0) { |
296 | /* If so, check system call restarting.. */ | 277 | /* If so, check system call restarting.. */ |
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
308 | /* fallthrough */ | 289 | /* fallthrough */ |
309 | case -ERESTARTNOINTR: | 290 | case -ERESTARTNOINTR: |
310 | regs->r0 = regs->orig_r0; | 291 | regs->r0 = regs->orig_r0; |
311 | inst = *(unsigned short *)(regs->bpc - 2); | 292 | if (prev_insn(regs) < 0) |
312 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | 293 | return -EFAULT; |
313 | regs->bpc -= 2; | ||
314 | else | ||
315 | regs->bpc -= 4; | ||
316 | } | 294 | } |
317 | } | 295 | } |
318 | 296 | ||
319 | /* Set up the stack frame */ | 297 | /* Set up the stack frame */ |
320 | setup_rt_frame(sig, ka, info, oldset, regs); | 298 | if (setup_rt_frame(sig, ka, info, oldset, regs)) |
299 | return -EFAULT; | ||
321 | 300 | ||
322 | spin_lock_irq(¤t->sighand->siglock); | 301 | spin_lock_irq(¤t->sighand->siglock); |
323 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 302 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
325 | sigaddset(¤t->blocked,sig); | 304 | sigaddset(¤t->blocked,sig); |
326 | recalc_sigpending(); | 305 | recalc_sigpending(); |
327 | spin_unlock_irq(¤t->sighand->siglock); | 306 | spin_unlock_irq(¤t->sighand->siglock); |
307 | return 0; | ||
328 | } | 308 | } |
329 | 309 | ||
330 | /* | 310 | /* |
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | |||
332 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | 312 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
333 | * mistake. | 313 | * mistake. |
334 | */ | 314 | */ |
335 | int do_signal(struct pt_regs *regs, sigset_t *oldset) | 315 | static void do_signal(struct pt_regs *regs) |
336 | { | 316 | { |
337 | siginfo_t info; | 317 | siginfo_t info; |
338 | int signr; | 318 | int signr; |
339 | struct k_sigaction ka; | 319 | struct k_sigaction ka; |
340 | unsigned short inst; | 320 | sigset_t *oldset; |
341 | 321 | ||
342 | /* | 322 | /* |
343 | * We want the common case to go fast, which | 323 | * We want the common case to go fast, which |
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
346 | * if so. | 326 | * if so. |
347 | */ | 327 | */ |
348 | if (!user_mode(regs)) | 328 | if (!user_mode(regs)) |
349 | return 1; | 329 | return; |
350 | 330 | ||
351 | if (try_to_freeze()) | 331 | if (try_to_freeze()) |
352 | goto no_signal; | 332 | goto no_signal; |
353 | 333 | ||
354 | if (!oldset) | 334 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
335 | oldset = ¤t->saved_sigmask; | ||
336 | else | ||
355 | oldset = ¤t->blocked; | 337 | oldset = ¤t->blocked; |
356 | 338 | ||
357 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 339 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
363 | */ | 345 | */ |
364 | 346 | ||
365 | /* Whee! Actually deliver the signal. */ | 347 | /* Whee! Actually deliver the signal. */ |
366 | handle_signal(signr, &ka, &info, oldset, regs); | 348 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) |
367 | return 1; | 349 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
350 | |||
351 | return; | ||
368 | } | 352 | } |
369 | 353 | ||
370 | no_signal: | 354 | no_signal: |
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
375 | regs->r0 == -ERESTARTSYS || | 359 | regs->r0 == -ERESTARTSYS || |
376 | regs->r0 == -ERESTARTNOINTR) { | 360 | regs->r0 == -ERESTARTNOINTR) { |
377 | regs->r0 = regs->orig_r0; | 361 | regs->r0 = regs->orig_r0; |
378 | inst = *(unsigned short *)(regs->bpc - 2); | 362 | prev_insn(regs); |
379 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | 363 | } else if (regs->r0 == -ERESTART_RESTARTBLOCK){ |
380 | regs->bpc -= 2; | ||
381 | else | ||
382 | regs->bpc -= 4; | ||
383 | } | ||
384 | if (regs->r0 == -ERESTART_RESTARTBLOCK){ | ||
385 | regs->r0 = regs->orig_r0; | 364 | regs->r0 = regs->orig_r0; |
386 | regs->r7 = __NR_restart_syscall; | 365 | regs->r7 = __NR_restart_syscall; |
387 | inst = *(unsigned short *)(regs->bpc - 2); | 366 | prev_insn(regs); |
388 | if ((inst & 0xfff0) == 0x10f0) /* trap ? */ | ||
389 | regs->bpc -= 2; | ||
390 | else | ||
391 | regs->bpc -= 4; | ||
392 | } | 367 | } |
393 | } | 368 | } |
394 | return 0; | 369 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { |
370 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
371 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
372 | } | ||
395 | } | 373 | } |
396 | 374 | ||
397 | /* | 375 | /* |
398 | * notification of userspace execution resumption | 376 | * notification of userspace execution resumption |
399 | * - triggered by current->work.notify_resume | 377 | * - triggered by current->work.notify_resume |
400 | */ | 378 | */ |
401 | void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | 379 | void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags) |
402 | __u32 thread_info_flags) | ||
403 | { | 380 | { |
404 | /* Pending single-step? */ | 381 | /* Pending single-step? */ |
405 | if (thread_info_flags & _TIF_SINGLESTEP) | 382 | if (thread_info_flags & _TIF_SINGLESTEP) |
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, | |||
407 | 384 | ||
408 | /* deal with pending signal delivery */ | 385 | /* deal with pending signal delivery */ |
409 | if (thread_info_flags & _TIF_SIGPENDING) | 386 | if (thread_info_flags & _TIF_SIGPENDING) |
410 | do_signal(regs,oldset); | 387 | do_signal(regs); |
411 | 388 | ||
412 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | 389 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { |
413 | clear_thread_flag(TIF_NOTIFY_RESUME); | 390 | clear_thread_flag(TIF_NOTIFY_RESUME); |
diff --git a/arch/m68k/mac/macboing.c b/arch/m68k/mac/macboing.c index 8f0640847ad2..05285d08e547 100644 --- a/arch/m68k/mac/macboing.c +++ b/arch/m68k/mac/macboing.c | |||
@@ -162,7 +162,7 @@ static void mac_init_asc( void ) | |||
162 | void mac_mksound( unsigned int freq, unsigned int length ) | 162 | void mac_mksound( unsigned int freq, unsigned int length ) |
163 | { | 163 | { |
164 | __u32 cfreq = ( freq << 5 ) / 468; | 164 | __u32 cfreq = ( freq << 5 ) / 468; |
165 | __u32 flags; | 165 | unsigned long flags; |
166 | int i; | 166 | int i; |
167 | 167 | ||
168 | if ( mac_special_bell == NULL ) | 168 | if ( mac_special_bell == NULL ) |
@@ -224,7 +224,7 @@ static void mac_nosound( unsigned long ignored ) | |||
224 | */ | 224 | */ |
225 | static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) | 225 | static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsigned int volume ) |
226 | { | 226 | { |
227 | __u32 flags; | 227 | unsigned long flags; |
228 | 228 | ||
229 | /* if the bell is already ringing, ring longer */ | 229 | /* if the bell is already ringing, ring longer */ |
230 | if ( mac_bell_duration > 0 ) | 230 | if ( mac_bell_duration > 0 ) |
@@ -271,7 +271,7 @@ static void mac_quadra_start_bell( unsigned int freq, unsigned int length, unsig | |||
271 | static void mac_quadra_ring_bell( unsigned long ignored ) | 271 | static void mac_quadra_ring_bell( unsigned long ignored ) |
272 | { | 272 | { |
273 | int i, count = mac_asc_samplespersec / HZ; | 273 | int i, count = mac_asc_samplespersec / HZ; |
274 | __u32 flags; | 274 | unsigned long flags; |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * we neither want a sound buffer overflow nor underflow, so we need to match | 277 | * we neither want a sound buffer overflow nor underflow, so we need to match |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3ad59dde4852..5526faabfc21 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -13,6 +13,7 @@ config MIPS | |||
13 | select HAVE_KPROBES | 13 | select HAVE_KPROBES |
14 | select HAVE_KRETPROBES | 14 | select HAVE_KRETPROBES |
15 | select RTC_LIB if !MACH_LOONGSON | 15 | select RTC_LIB if !MACH_LOONGSON |
16 | select GENERIC_ATOMIC64 if !64BIT | ||
16 | 17 | ||
17 | mainmenu "Linux/MIPS Kernel Configuration" | 18 | mainmenu "Linux/MIPS Kernel Configuration" |
18 | 19 | ||
@@ -1646,8 +1647,16 @@ config MIPS_MT_SMP | |||
1646 | select SYS_SUPPORTS_SMP | 1647 | select SYS_SUPPORTS_SMP |
1647 | select SMP_UP | 1648 | select SMP_UP |
1648 | help | 1649 | help |
1649 | This is a kernel model which is also known a VSMP or lately | 1650 | This is a kernel model which is known a VSMP but lately has been |
1650 | has been marketesed into SMVP. | 1651 | marketesed into SMVP. |
1652 | Virtual SMP uses the processor's VPEs to implement virtual | ||
1653 | processors. In currently available configuration of the 34K processor | ||
1654 | this allows for a dual processor. Both processors will share the same | ||
1655 | primary caches; each will obtain the half of the TLB for it's own | ||
1656 | exclusive use. For a layman this model can be described as similar to | ||
1657 | what Intel calls Hyperthreading. | ||
1658 | |||
1659 | For further information see http://www.linux-mips.org/wiki/34K#VSMP | ||
1651 | 1660 | ||
1652 | config MIPS_MT_SMTC | 1661 | config MIPS_MT_SMTC |
1653 | bool "SMTC: Use all TCs on all VPEs for SMP" | 1662 | bool "SMTC: Use all TCs on all VPEs for SMP" |
@@ -1664,6 +1673,14 @@ config MIPS_MT_SMTC | |||
1664 | help | 1673 | help |
1665 | This is a kernel model which is known a SMTC or lately has been | 1674 | This is a kernel model which is known a SMTC or lately has been |
1666 | marketesed into SMVP. | 1675 | marketesed into SMVP. |
1676 | is presenting the available TC's of the core as processors to Linux. | ||
1677 | On currently available 34K processors this means a Linux system will | ||
1678 | see up to 5 processors. The implementation of the SMTC kernel differs | ||
1679 | significantly from VSMP and cannot efficiently coexist in the same | ||
1680 | kernel binary so the choice between VSMP and SMTC is a compile time | ||
1681 | decision. | ||
1682 | |||
1683 | For further information see http://www.linux-mips.org/wiki/34K#SMTC | ||
1667 | 1684 | ||
1668 | endchoice | 1685 | endchoice |
1669 | 1686 | ||
diff --git a/arch/mips/alchemy/common/prom.c b/arch/mips/alchemy/common/prom.c index c29511b11d44..534021059629 100644 --- a/arch/mips/alchemy/common/prom.c +++ b/arch/mips/alchemy/common/prom.c | |||
@@ -43,7 +43,7 @@ int prom_argc; | |||
43 | char **prom_argv; | 43 | char **prom_argv; |
44 | char **prom_envp; | 44 | char **prom_envp; |
45 | 45 | ||
46 | void prom_init_cmdline(void) | 46 | void __init prom_init_cmdline(void) |
47 | { | 47 | { |
48 | int i; | 48 | int i; |
49 | 49 | ||
@@ -104,7 +104,7 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str) | |||
104 | } | 104 | } |
105 | } | 105 | } |
106 | 106 | ||
107 | int prom_get_ethernet_addr(char *ethernet_addr) | 107 | int __init prom_get_ethernet_addr(char *ethernet_addr) |
108 | { | 108 | { |
109 | char *ethaddr_str; | 109 | char *ethaddr_str; |
110 | 110 | ||
@@ -123,7 +123,6 @@ int prom_get_ethernet_addr(char *ethernet_addr) | |||
123 | 123 | ||
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | EXPORT_SYMBOL(prom_get_ethernet_addr); | ||
127 | 126 | ||
128 | void __init prom_free_prom_memory(void) | 127 | void __init prom_free_prom_memory(void) |
129 | { | 128 | { |
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index ed9bb709c9a3..5fd7f7a58b7e 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile | |||
@@ -59,7 +59,7 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE | |||
59 | hostprogs-y := calc_vmlinuz_load_addr | 59 | hostprogs-y := calc_vmlinuz_load_addr |
60 | 60 | ||
61 | VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ | 61 | VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ |
62 | $(objtree)/$(KBUILD_IMAGE) $(VMLINUX_LOAD_ADDRESS)) | 62 | $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) |
63 | 63 | ||
64 | vmlinuzobjs-y += $(obj)/piggy.o | 64 | vmlinuzobjs-y += $(obj)/piggy.o |
65 | 65 | ||
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 094c17e38e16..47323ca452dc 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig | |||
@@ -83,3 +83,7 @@ config ARCH_SPARSEMEM_ENABLE | |||
83 | def_bool y | 83 | def_bool y |
84 | select SPARSEMEM_STATIC | 84 | select SPARSEMEM_STATIC |
85 | depends on CPU_CAVIUM_OCTEON | 85 | depends on CPU_CAVIUM_OCTEON |
86 | |||
87 | config CAVIUM_OCTEON_HELPER | ||
88 | def_bool y | ||
89 | depends on OCTEON_ETHERNET || PCI | ||
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c index c664c8cc2b42..a5b427909b5c 100644 --- a/arch/mips/cavium-octeon/cpu.c +++ b/arch/mips/cavium-octeon/cpu.c | |||
@@ -41,7 +41,7 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
41 | return NOTIFY_OK; /* Let default notifier send signals */ | 41 | return NOTIFY_OK; /* Let default notifier send signals */ |
42 | } | 42 | } |
43 | 43 | ||
44 | static int cnmips_cu2_setup(void) | 44 | static int __init cnmips_cu2_setup(void) |
45 | { | 45 | { |
46 | return cu2_notifier(cnmips_cu2_call, 0); | 46 | return cu2_notifier(cnmips_cu2_call, 0); |
47 | } | 47 | } |
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile index 2fd66db6939e..7f41c5be2190 100644 --- a/arch/mips/cavium-octeon/executive/Makefile +++ b/arch/mips/cavium-octeon/executive/Makefile | |||
@@ -11,4 +11,4 @@ | |||
11 | 11 | ||
12 | obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o | 12 | obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o |
13 | 13 | ||
14 | obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o | 14 | obj-$(CONFIG_CAVIUM_OCTEON_HELPER) += cvmx-helper-errata.o cvmx-helper-jtag.o |
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index c63c56bfd184..47d87da379f9 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -782,6 +782,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
782 | */ | 782 | */ |
783 | #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) | 783 | #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) |
784 | 784 | ||
785 | #else /* !CONFIG_64BIT */ | ||
786 | |||
787 | #include <asm-generic/atomic64.h> | ||
788 | |||
785 | #endif /* CONFIG_64BIT */ | 789 | #endif /* CONFIG_64BIT */ |
786 | 790 | ||
787 | /* | 791 | /* |
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index 2cb2f0c2c4f8..3532e2c5f098 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h | |||
@@ -24,7 +24,7 @@ extern int cu2_notifier_call_chain(unsigned long val, void *v); | |||
24 | 24 | ||
25 | #define cu2_notifier(fn, pri) \ | 25 | #define cu2_notifier(fn, pri) \ |
26 | ({ \ | 26 | ({ \ |
27 | static struct notifier_block fn##_nb __cpuinitdata = { \ | 27 | static struct notifier_block fn##_nb = { \ |
28 | .notifier_call = fn, \ | 28 | .notifier_call = fn, \ |
29 | .priority = pri \ | 29 | .priority = pri \ |
30 | }; \ | 30 | }; \ |
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 9b9436a4d816..86548da650e7 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h | |||
@@ -321,6 +321,7 @@ struct gic_intrmask_regs { | |||
321 | */ | 321 | */ |
322 | struct gic_intr_map { | 322 | struct gic_intr_map { |
323 | unsigned int cpunum; /* Directed to this CPU */ | 323 | unsigned int cpunum; /* Directed to this CPU */ |
324 | #define GIC_UNUSED 0xdead /* Dummy data */ | ||
324 | unsigned int pin; /* Directed to this Pin */ | 325 | unsigned int pin; /* Directed to this Pin */ |
325 | unsigned int polarity; /* Polarity : +/- */ | 326 | unsigned int polarity; /* Polarity : +/- */ |
326 | unsigned int trigtype; /* Trigger : Edge/Levl */ | 327 | unsigned int trigtype; /* Trigger : Edge/Levl */ |
diff --git a/arch/mips/include/asm/mach-tx49xx/kmalloc.h b/arch/mips/include/asm/mach-tx49xx/kmalloc.h index b74caf65482b..ff9a8b86cb93 100644 --- a/arch/mips/include/asm/mach-tx49xx/kmalloc.h +++ b/arch/mips/include/asm/mach-tx49xx/kmalloc.h | |||
@@ -1,6 +1,6 @@ | |||
1 | #ifndef __ASM_MACH_TX49XX_KMALLOC_H | 1 | #ifndef __ASM_MACH_TX49XX_KMALLOC_H |
2 | #define __ASM_MACH_TX49XX_KMALLOC_H | 2 | #define __ASM_MACH_TX49XX_KMALLOC_H |
3 | 3 | ||
4 | #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES | 4 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES |
5 | 5 | ||
6 | #endif /* __ASM_MACH_TX49XX_KMALLOC_H */ | 6 | #endif /* __ASM_MACH_TX49XX_KMALLOC_H */ |
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h index cea872fc6f5c..d11aa02a956a 100644 --- a/arch/mips/include/asm/mips-boards/maltaint.h +++ b/arch/mips/include/asm/mips-boards/maltaint.h | |||
@@ -88,9 +88,6 @@ | |||
88 | 88 | ||
89 | #define GIC_EXT_INTR(x) x | 89 | #define GIC_EXT_INTR(x) x |
90 | 90 | ||
91 | /* Dummy data */ | ||
92 | #define X 0xdead | ||
93 | |||
94 | /* External Interrupts used for IPI */ | 91 | /* External Interrupts used for IPI */ |
95 | #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 | 92 | #define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 |
96 | #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 | 93 | #define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index a16beafcea91..e59cd1ac09c2 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
@@ -150,6 +150,20 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) | 150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) |
151 | #endif | 151 | #endif |
152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) | 152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) |
153 | |||
154 | /* | ||
155 | * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad | ||
156 | * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The | ||
157 | * discussion can be found in lkml posting | ||
158 | * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is | ||
159 | * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html | ||
160 | * | ||
161 | * It is unclear if the misscompilations mentioned in | ||
162 | * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one | ||
163 | * until GCC 3.x has been retired before we can apply | ||
164 | * https://patchwork.linux-mips.org/patch/1541/ | ||
165 | */ | ||
166 | |||
153 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) | 167 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) |
154 | 168 | ||
155 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 169 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 2376f2e06e47..70df9c0d3c5b 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -146,7 +146,8 @@ register struct thread_info *__current_thread_info __asm__("$28"); | |||
146 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) | 146 | #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) |
147 | 147 | ||
148 | /* work to do on interrupt/exception return */ | 148 | /* work to do on interrupt/exception return */ |
149 | #define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP) | 149 | #define _TIF_WORK_MASK (0x0000ffef & \ |
150 | ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT)) | ||
150 | /* work to do on any return to u-space */ | 151 | /* work to do on any return to u-space */ |
151 | #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) | 152 | #define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP) |
152 | 153 | ||
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index baa318a59c97..550725b881d5 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h | |||
@@ -356,16 +356,19 @@ | |||
356 | #define __NR_perf_event_open (__NR_Linux + 333) | 356 | #define __NR_perf_event_open (__NR_Linux + 333) |
357 | #define __NR_accept4 (__NR_Linux + 334) | 357 | #define __NR_accept4 (__NR_Linux + 334) |
358 | #define __NR_recvmmsg (__NR_Linux + 335) | 358 | #define __NR_recvmmsg (__NR_Linux + 335) |
359 | #define __NR_fanotify_init (__NR_Linux + 336) | ||
360 | #define __NR_fanotify_mark (__NR_Linux + 337) | ||
361 | #define __NR_prlimit64 (__NR_Linux + 338) | ||
359 | 362 | ||
360 | /* | 363 | /* |
361 | * Offset of the last Linux o32 flavoured syscall | 364 | * Offset of the last Linux o32 flavoured syscall |
362 | */ | 365 | */ |
363 | #define __NR_Linux_syscalls 335 | 366 | #define __NR_Linux_syscalls 338 |
364 | 367 | ||
365 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | 368 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
366 | 369 | ||
367 | #define __NR_O32_Linux 4000 | 370 | #define __NR_O32_Linux 4000 |
368 | #define __NR_O32_Linux_syscalls 335 | 371 | #define __NR_O32_Linux_syscalls 338 |
369 | 372 | ||
370 | #if _MIPS_SIM == _MIPS_SIM_ABI64 | 373 | #if _MIPS_SIM == _MIPS_SIM_ABI64 |
371 | 374 | ||
@@ -668,16 +671,19 @@ | |||
668 | #define __NR_perf_event_open (__NR_Linux + 292) | 671 | #define __NR_perf_event_open (__NR_Linux + 292) |
669 | #define __NR_accept4 (__NR_Linux + 293) | 672 | #define __NR_accept4 (__NR_Linux + 293) |
670 | #define __NR_recvmmsg (__NR_Linux + 294) | 673 | #define __NR_recvmmsg (__NR_Linux + 294) |
674 | #define __NR_fanotify_init (__NR_Linux + 295) | ||
675 | #define __NR_fanotify_mark (__NR_Linux + 296) | ||
676 | #define __NR_prlimit64 (__NR_Linux + 297) | ||
671 | 677 | ||
672 | /* | 678 | /* |
673 | * Offset of the last Linux 64-bit flavoured syscall | 679 | * Offset of the last Linux 64-bit flavoured syscall |
674 | */ | 680 | */ |
675 | #define __NR_Linux_syscalls 294 | 681 | #define __NR_Linux_syscalls 297 |
676 | 682 | ||
677 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ | 683 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ |
678 | 684 | ||
679 | #define __NR_64_Linux 5000 | 685 | #define __NR_64_Linux 5000 |
680 | #define __NR_64_Linux_syscalls 294 | 686 | #define __NR_64_Linux_syscalls 297 |
681 | 687 | ||
682 | #if _MIPS_SIM == _MIPS_SIM_NABI32 | 688 | #if _MIPS_SIM == _MIPS_SIM_NABI32 |
683 | 689 | ||
@@ -985,16 +991,19 @@ | |||
985 | #define __NR_accept4 (__NR_Linux + 297) | 991 | #define __NR_accept4 (__NR_Linux + 297) |
986 | #define __NR_recvmmsg (__NR_Linux + 298) | 992 | #define __NR_recvmmsg (__NR_Linux + 298) |
987 | #define __NR_getdents64 (__NR_Linux + 299) | 993 | #define __NR_getdents64 (__NR_Linux + 299) |
994 | #define __NR_fanotify_init (__NR_Linux + 300) | ||
995 | #define __NR_fanotify_mark (__NR_Linux + 301) | ||
996 | #define __NR_prlimit64 (__NR_Linux + 302) | ||
988 | 997 | ||
989 | /* | 998 | /* |
990 | * Offset of the last N32 flavoured syscall | 999 | * Offset of the last N32 flavoured syscall |
991 | */ | 1000 | */ |
992 | #define __NR_Linux_syscalls 299 | 1001 | #define __NR_Linux_syscalls 302 |
993 | 1002 | ||
994 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ | 1003 | #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ |
995 | 1004 | ||
996 | #define __NR_N32_Linux 6000 | 1005 | #define __NR_N32_Linux 6000 |
997 | #define __NR_N32_Linux_syscalls 299 | 1006 | #define __NR_N32_Linux_syscalls 302 |
998 | 1007 | ||
999 | #ifdef __KERNEL__ | 1008 | #ifdef __KERNEL__ |
1000 | 1009 | ||
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index b181f2f0ea8e..82ba9f62f49e 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <asm/io.h> | 7 | #include <asm/io.h> |
8 | #include <asm/gic.h> | 8 | #include <asm/gic.h> |
9 | #include <asm/gcmpregs.h> | 9 | #include <asm/gcmpregs.h> |
10 | #include <asm/mips-boards/maltaint.h> | ||
11 | #include <asm/irq.h> | 10 | #include <asm/irq.h> |
12 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
13 | #include <asm-generic/bitops/find.h> | 12 | #include <asm-generic/bitops/find.h> |
@@ -131,7 +130,7 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
131 | int i; | 130 | int i; |
132 | 131 | ||
133 | irq -= _irqbase; | 132 | irq -= _irqbase; |
134 | pr_debug(KERN_DEBUG "%s(%d) called\n", __func__, irq); | 133 | pr_debug("%s(%d) called\n", __func__, irq); |
135 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 134 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
136 | if (cpus_empty(tmp)) | 135 | if (cpus_empty(tmp)) |
137 | return -1; | 136 | return -1; |
@@ -222,7 +221,7 @@ static void __init gic_basic_init(int numintrs, int numvpes, | |||
222 | /* Setup specifics */ | 221 | /* Setup specifics */ |
223 | for (i = 0; i < mapsize; i++) { | 222 | for (i = 0; i < mapsize; i++) { |
224 | cpu = intrmap[i].cpunum; | 223 | cpu = intrmap[i].cpunum; |
225 | if (cpu == X) | 224 | if (cpu == GIC_UNUSED) |
226 | continue; | 225 | continue; |
227 | if (cpu == 0 && i != 0 && intrmap[i].flags == 0) | 226 | if (cpu == 0 && i != 0 && intrmap[i].flags == 0) |
228 | continue; | 227 | continue; |
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 1f4e2fa64140..f4546e97c60d 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c | |||
@@ -283,7 +283,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, | |||
283 | struct pt_regs *regs = args->regs; | 283 | struct pt_regs *regs = args->regs; |
284 | int trap = (regs->cp0_cause & 0x7c) >> 2; | 284 | int trap = (regs->cp0_cause & 0x7c) >> 2; |
285 | 285 | ||
286 | /* Userpace events, ignore. */ | 286 | /* Userspace events, ignore. */ |
287 | if (user_mode(regs)) | 287 | if (user_mode(regs)) |
288 | return NOTIFY_DONE; | 288 | return NOTIFY_DONE; |
289 | 289 | ||
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index 80e2ba694bab..29811f043399 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
@@ -251,7 +251,7 @@ void sp_work_handle_request(void) | |||
251 | memset(&tz, 0, sizeof(tz)); | 251 | memset(&tz, 0, sizeof(tz)); |
252 | if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, | 252 | if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv, |
253 | (int)&tz, 0, 0)) == 0) | 253 | (int)&tz, 0, 0)) == 0) |
254 | ret.retval = tv.tv_sec; | 254 | ret.retval = tv.tv_sec; |
255 | break; | 255 | break; |
256 | 256 | ||
257 | case MTSP_SYSCALL_EXIT: | 257 | case MTSP_SYSCALL_EXIT: |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index c2dab140dc98..6343b4a5b835 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -341,3 +341,10 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf, | |||
341 | { | 341 | { |
342 | return sys_lookup_dcookie(merge_64(a0, a1), buf, len); | 342 | return sys_lookup_dcookie(merge_64(a0, a1), buf, len); |
343 | } | 343 | } |
344 | |||
345 | SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, | ||
346 | u64, a3, u64, a4, int, dfd, const char __user *, pathname) | ||
347 | { | ||
348 | return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), | ||
349 | dfd, pathname); | ||
350 | } | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 17202bbe843f..584415eef8c9 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -583,7 +583,10 @@ einval: li v0, -ENOSYS | |||
583 | sys sys_rt_tgsigqueueinfo 4 | 583 | sys sys_rt_tgsigqueueinfo 4 |
584 | sys sys_perf_event_open 5 | 584 | sys sys_perf_event_open 5 |
585 | sys sys_accept4 4 | 585 | sys sys_accept4 4 |
586 | sys sys_recvmmsg 5 | 586 | sys sys_recvmmsg 5 /* 4335 */ |
587 | sys sys_fanotify_init 2 | ||
588 | sys sys_fanotify_mark 6 | ||
589 | sys sys_prlimit64 4 | ||
587 | .endm | 590 | .endm |
588 | 591 | ||
589 | /* We pre-compute the number of _instruction_ bytes needed to | 592 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a8a6c596eb04..5573f8e4e326 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -416,9 +416,12 @@ sys_call_table: | |||
416 | PTR sys_pipe2 | 416 | PTR sys_pipe2 |
417 | PTR sys_inotify_init1 | 417 | PTR sys_inotify_init1 |
418 | PTR sys_preadv | 418 | PTR sys_preadv |
419 | PTR sys_pwritev /* 5390 */ | 419 | PTR sys_pwritev /* 5290 */ |
420 | PTR sys_rt_tgsigqueueinfo | 420 | PTR sys_rt_tgsigqueueinfo |
421 | PTR sys_perf_event_open | 421 | PTR sys_perf_event_open |
422 | PTR sys_accept4 | 422 | PTR sys_accept4 |
423 | PTR sys_recvmmsg | 423 | PTR sys_recvmmsg |
424 | PTR sys_fanotify_init /* 5295 */ | ||
425 | PTR sys_fanotify_mark | ||
426 | PTR sys_prlimit64 | ||
424 | .size sys_call_table,.-sys_call_table | 427 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index a3d66137731a..1e38ec97672e 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -419,5 +419,8 @@ EXPORT(sysn32_call_table) | |||
419 | PTR sys_perf_event_open | 419 | PTR sys_perf_event_open |
420 | PTR sys_accept4 | 420 | PTR sys_accept4 |
421 | PTR compat_sys_recvmmsg | 421 | PTR compat_sys_recvmmsg |
422 | PTR sys_getdents | 422 | PTR sys_getdents64 |
423 | PTR sys_fanotify_init /* 6300 */ | ||
424 | PTR sys_fanotify_mark | ||
425 | PTR sys_prlimit64 | ||
423 | .size sysn32_call_table,.-sysn32_call_table | 426 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 813689ef2384..171979fc98e5 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -538,5 +538,8 @@ sys_call_table: | |||
538 | PTR compat_sys_rt_tgsigqueueinfo | 538 | PTR compat_sys_rt_tgsigqueueinfo |
539 | PTR sys_perf_event_open | 539 | PTR sys_perf_event_open |
540 | PTR sys_accept4 | 540 | PTR sys_accept4 |
541 | PTR compat_sys_recvmmsg | 541 | PTR compat_sys_recvmmsg /* 4335 */ |
542 | PTR sys_fanotify_init | ||
543 | PTR sys_32_fanotify_mark | ||
544 | PTR sys_prlimit64 | ||
542 | .size sys_call_table,.-sys_call_table | 545 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 7ba890860d98..469d4019f795 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -44,27 +44,39 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev) | |||
44 | 44 | ||
45 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) | 45 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
46 | { | 46 | { |
47 | gfp_t dma_flag; | ||
48 | |||
47 | /* ignore region specifiers */ | 49 | /* ignore region specifiers */ |
48 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | 50 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
49 | 51 | ||
50 | #ifdef CONFIG_ZONE_DMA | 52 | #ifdef CONFIG_ISA |
51 | if (dev == NULL) | 53 | if (dev == NULL) |
52 | gfp |= __GFP_DMA; | 54 | dma_flag = __GFP_DMA; |
53 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(24)) | ||
54 | gfp |= __GFP_DMA; | ||
55 | else | 55 | else |
56 | #endif | 56 | #endif |
57 | #ifdef CONFIG_ZONE_DMA32 | 57 | #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) |
58 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) | 58 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
59 | gfp |= __GFP_DMA32; | 59 | dma_flag = __GFP_DMA; |
60 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
61 | dma_flag = __GFP_DMA32; | ||
62 | else | ||
63 | #endif | ||
64 | #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) | ||
65 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
66 | dma_flag = __GFP_DMA32; | ||
67 | else | ||
68 | #endif | ||
69 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) | ||
70 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) | ||
71 | dma_flag = __GFP_DMA; | ||
60 | else | 72 | else |
61 | #endif | 73 | #endif |
62 | ; | 74 | dma_flag = 0; |
63 | 75 | ||
64 | /* Don't invoke OOM killer */ | 76 | /* Don't invoke OOM killer */ |
65 | gfp |= __GFP_NORETRY; | 77 | gfp |= __GFP_NORETRY; |
66 | 78 | ||
67 | return gfp; | 79 | return gfp | dma_flag; |
68 | } | 80 | } |
69 | 81 | ||
70 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 82 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 1ef75cd80a0d..274af3be1442 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #define tc_lsize 32 | 30 | #define tc_lsize 32 |
31 | 31 | ||
32 | extern unsigned long icache_way_size, dcache_way_size; | 32 | extern unsigned long icache_way_size, dcache_way_size; |
33 | unsigned long tcache_size; | 33 | static unsigned long tcache_size; |
34 | 34 | ||
35 | #include <asm/r4kcache.h> | 35 | #include <asm/r4kcache.h> |
36 | 36 | ||
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 15949b0be811..b79b24afe3a2 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -385,6 +385,8 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap); | |||
385 | */ | 385 | */ |
386 | 386 | ||
387 | #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK | 387 | #define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK |
388 | #define X GIC_UNUSED | ||
389 | |||
388 | static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { | 390 | static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { |
389 | { X, X, X, X, 0 }, | 391 | { X, X, X, X, 0 }, |
390 | { X, X, X, X, 0 }, | 392 | { X, X, X, X, 0 }, |
@@ -404,6 +406,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { | |||
404 | { X, X, X, X, 0 }, | 406 | { X, X, X, X, 0 }, |
405 | /* The remainder of this table is initialised by fill_ipi_map */ | 407 | /* The remainder of this table is initialised by fill_ipi_map */ |
406 | }; | 408 | }; |
409 | #undef X | ||
407 | 410 | ||
408 | /* | 411 | /* |
409 | * GCMP needs to be detected before any SMP initialisation | 412 | * GCMP needs to be detected before any SMP initialisation |
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 71f7d27b0d4c..f31218e17d3c 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c | |||
@@ -118,7 +118,7 @@ static int __init rc32434_pcibridge_init(void) | |||
118 | if (!((pcicvalue == PCIM_H_EA) || | 118 | if (!((pcicvalue == PCIM_H_EA) || |
119 | (pcicvalue == PCIM_H_IA_FIX) || | 119 | (pcicvalue == PCIM_H_IA_FIX) || |
120 | (pcicvalue == PCIM_H_IA_RR))) { | 120 | (pcicvalue == PCIM_H_IA_RR))) { |
121 | pr_err(KERN_ERR "PCI init error!!!\n"); | 121 | pr_err("PCI init error!!!\n"); |
122 | /* Not in Host Mode, return ERROR */ | 122 | /* Not in Host Mode, return ERROR */ |
123 | return -1; | 123 | return -1; |
124 | } | 124 | } |
diff --git a/arch/mips/pnx8550/common/reset.c b/arch/mips/pnx8550/common/reset.c index fadd8744a6bc..e7a12ff304b9 100644 --- a/arch/mips/pnx8550/common/reset.c +++ b/arch/mips/pnx8550/common/reset.c | |||
@@ -22,29 +22,19 @@ | |||
22 | */ | 22 | */ |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | 24 | ||
25 | #include <asm/processor.h> | ||
25 | #include <asm/reboot.h> | 26 | #include <asm/reboot.h> |
26 | #include <glb.h> | 27 | #include <glb.h> |
27 | 28 | ||
28 | void pnx8550_machine_restart(char *command) | 29 | void pnx8550_machine_restart(char *command) |
29 | { | 30 | { |
30 | char head[] = "************* Machine restart *************"; | ||
31 | char foot[] = "*******************************************"; | ||
32 | |||
33 | printk("\n\n"); | ||
34 | printk("%s\n", head); | ||
35 | if (command != NULL) | ||
36 | printk("* %s\n", command); | ||
37 | printk("%s\n", foot); | ||
38 | |||
39 | PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; | 31 | PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST; |
40 | } | 32 | } |
41 | 33 | ||
42 | void pnx8550_machine_halt(void) | 34 | void pnx8550_machine_halt(void) |
43 | { | 35 | { |
44 | printk("*** Machine halt. (Not implemented) ***\n"); | 36 | while (1) { |
45 | } | 37 | if (cpu_wait) |
46 | 38 | cpu_wait(); | |
47 | void pnx8550_machine_power_off(void) | 39 | } |
48 | { | ||
49 | printk("*** Machine power off. (Not implemented) ***\n"); | ||
50 | } | 40 | } |
diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c index 64246c9c875c..43cb3945fdbf 100644 --- a/arch/mips/pnx8550/common/setup.c +++ b/arch/mips/pnx8550/common/setup.c | |||
@@ -44,7 +44,6 @@ | |||
44 | extern void __init board_setup(void); | 44 | extern void __init board_setup(void); |
45 | extern void pnx8550_machine_restart(char *); | 45 | extern void pnx8550_machine_restart(char *); |
46 | extern void pnx8550_machine_halt(void); | 46 | extern void pnx8550_machine_halt(void); |
47 | extern void pnx8550_machine_power_off(void); | ||
48 | extern struct resource ioport_resource; | 47 | extern struct resource ioport_resource; |
49 | extern struct resource iomem_resource; | 48 | extern struct resource iomem_resource; |
50 | extern char *prom_getcmdline(void); | 49 | extern char *prom_getcmdline(void); |
@@ -100,7 +99,7 @@ void __init plat_mem_setup(void) | |||
100 | 99 | ||
101 | _machine_restart = pnx8550_machine_restart; | 100 | _machine_restart = pnx8550_machine_restart; |
102 | _machine_halt = pnx8550_machine_halt; | 101 | _machine_halt = pnx8550_machine_halt; |
103 | pm_power_off = pnx8550_machine_power_off; | 102 | pm_power_off = pnx8550_machine_halt; |
104 | 103 | ||
105 | /* Clear the Global 2 Register, PCI Inta Output Enable Registers | 104 | /* Clear the Global 2 Register, PCI Inta Output Enable Registers |
106 | Bit 1:Enable DAC Powerdown | 105 | Bit 1:Enable DAC Powerdown |
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 444b9f918fdf..7c2a2f7f8dc1 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration" | |||
8 | config MN10300 | 8 | config MN10300 |
9 | def_bool y | 9 | def_bool y |
10 | select HAVE_OPROFILE | 10 | select HAVE_OPROFILE |
11 | select HAVE_ARCH_TRACEHOOK | ||
12 | 11 | ||
13 | config AM33 | 12 | config AM33 |
14 | def_bool y | 13 | def_bool y |
diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug index ff80e86b9bd2..ce83c74b3fd7 100644 --- a/arch/mn10300/Kconfig.debug +++ b/arch/mn10300/Kconfig.debug | |||
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT | |||
101 | 101 | ||
102 | choice | 102 | choice |
103 | prompt "GDB stub port" | 103 | prompt "GDB stub port" |
104 | default GDBSTUB_TTYSM0 | 104 | default GDBSTUB_ON_TTYSM0 |
105 | depends on GDBSTUB | 105 | depends on GDBSTUB |
106 | help | 106 | help |
107 | Select the serial port used for GDB-stub. | 107 | Select the serial port used for GDB-stub. |
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h index f49ac49e09ad..3f50e9661076 100644 --- a/arch/mn10300/include/asm/bitops.h +++ b/arch/mn10300/include/asm/bitops.h | |||
@@ -229,9 +229,9 @@ int ffs(int x) | |||
229 | #include <asm-generic/bitops/hweight.h> | 229 | #include <asm-generic/bitops/hweight.h> |
230 | 230 | ||
231 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 231 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
232 | test_and_set_bit((nr) ^ 0x18, (addr)) | 232 | test_and_set_bit((nr), (addr)) |
233 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 233 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
234 | test_and_clear_bit((nr) ^ 0x18, (addr)) | 234 | test_and_clear_bit((nr), (addr)) |
235 | 235 | ||
236 | #include <asm-generic/bitops/ext2-non-atomic.h> | 236 | #include <asm-generic/bitops/ext2-non-atomic.h> |
237 | #include <asm-generic/bitops/minix-le.h> | 237 | #include <asm-generic/bitops/minix-le.h> |
diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h index 7e891fce2370..1865d72a86ff 100644 --- a/arch/mn10300/include/asm/signal.h +++ b/arch/mn10300/include/asm/signal.h | |||
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t; | |||
78 | 78 | ||
79 | /* These should not be considered constants from userland. */ | 79 | /* These should not be considered constants from userland. */ |
80 | #define SIGRTMIN 32 | 80 | #define SIGRTMIN 32 |
81 | #define SIGRTMAX (_NSIG-1) | 81 | #define SIGRTMAX _NSIG |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * SA_FLAGS values: | 84 | * SA_FLAGS values: |
diff --git a/arch/mn10300/kernel/module.c b/arch/mn10300/kernel/module.c index 6aea7fd76993..196a111e2e29 100644 --- a/arch/mn10300/kernel/module.c +++ b/arch/mn10300/kernel/module.c | |||
@@ -206,7 +206,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
206 | const Elf_Shdr *sechdrs, | 206 | const Elf_Shdr *sechdrs, |
207 | struct module *me) | 207 | struct module *me) |
208 | { | 208 | { |
209 | return module_bug_finalize(hdr, sechdrs, me); | 209 | return 0; |
210 | } | 210 | } |
211 | 211 | ||
212 | /* | 212 | /* |
@@ -214,5 +214,4 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
214 | */ | 214 | */ |
215 | void module_arch_cleanup(struct module *mod) | 215 | void module_arch_cleanup(struct module *mod) |
216 | { | 216 | { |
217 | module_bug_cleanup(mod); | ||
218 | } | 217 | } |
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index 717db14c2cc3..d4de05ab7864 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c | |||
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig, | |||
65 | old_sigset_t mask; | 65 | old_sigset_t mask; |
66 | if (verify_area(VERIFY_READ, act, sizeof(*act)) || | 66 | if (verify_area(VERIFY_READ, act, sizeof(*act)) || |
67 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 67 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
68 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | 68 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || |
69 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | ||
70 | __get_user(mask, &act->sa_mask)) | ||
69 | return -EFAULT; | 71 | return -EFAULT; |
70 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
71 | __get_user(mask, &act->sa_mask); | ||
72 | siginitset(&new_ka.sa.sa_mask, mask); | 72 | siginitset(&new_ka.sa.sa_mask, mask); |
73 | } | 73 | } |
74 | 74 | ||
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig, | |||
77 | if (!ret && oact) { | 77 | if (!ret && oact) { |
78 | if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || | 78 | if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || |
79 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 79 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
80 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | 80 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || |
81 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | ||
82 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
81 | return -EFAULT; | 83 | return -EFAULT; |
82 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
83 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
84 | } | 84 | } |
85 | 85 | ||
86 | return ret; | 86 | return ret; |
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs, | |||
102 | { | 102 | { |
103 | unsigned int err = 0; | 103 | unsigned int err = 0; |
104 | 104 | ||
105 | /* Always make any pending restarted system calls return -EINTR */ | ||
106 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
107 | |||
105 | if (is_using_fpu(current)) | 108 | if (is_using_fpu(current)) |
106 | fpu_kill_state(current); | 109 | fpu_kill_state(current); |
107 | 110 | ||
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
330 | regs->d0 = sig; | 333 | regs->d0 = sig; |
331 | regs->d1 = (unsigned long) &frame->sc; | 334 | regs->d1 = (unsigned long) &frame->sc; |
332 | 335 | ||
333 | set_fs(USER_DS); | ||
334 | |||
335 | /* the tracer may want to single-step inside the handler */ | 336 | /* the tracer may want to single-step inside the handler */ |
336 | if (test_thread_flag(TIF_SINGLESTEP)) | 337 | if (test_thread_flag(TIF_SINGLESTEP)) |
337 | ptrace_notify(SIGTRAP); | 338 | ptrace_notify(SIGTRAP); |
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, | |||
345 | return 0; | 346 | return 0; |
346 | 347 | ||
347 | give_sigsegv: | 348 | give_sigsegv: |
348 | force_sig(SIGSEGV, current); | 349 | force_sigsegv(sig, current); |
349 | return -EFAULT; | 350 | return -EFAULT; |
350 | } | 351 | } |
351 | 352 | ||
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
413 | regs->d0 = sig; | 414 | regs->d0 = sig; |
414 | regs->d1 = (long) &frame->info; | 415 | regs->d1 = (long) &frame->info; |
415 | 416 | ||
416 | set_fs(USER_DS); | ||
417 | |||
418 | /* the tracer may want to single-step inside the handler */ | 417 | /* the tracer may want to single-step inside the handler */ |
419 | if (test_thread_flag(TIF_SINGLESTEP)) | 418 | if (test_thread_flag(TIF_SINGLESTEP)) |
420 | ptrace_notify(SIGTRAP); | 419 | ptrace_notify(SIGTRAP); |
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
428 | return 0; | 427 | return 0; |
429 | 428 | ||
430 | give_sigsegv: | 429 | give_sigsegv: |
431 | force_sig(SIGSEGV, current); | 430 | force_sigsegv(sig, current); |
432 | return -EFAULT; | 431 | return -EFAULT; |
433 | } | 432 | } |
434 | 433 | ||
434 | static inline void stepback(struct pt_regs *regs) | ||
435 | { | ||
436 | regs->pc -= 2; | ||
437 | regs->orig_d0 = -1; | ||
438 | } | ||
439 | |||
435 | /* | 440 | /* |
436 | * handle the actual delivery of a signal to userspace | 441 | * handle the actual delivery of a signal to userspace |
437 | */ | 442 | */ |
@@ -459,7 +464,7 @@ static int handle_signal(int sig, | |||
459 | /* fallthrough */ | 464 | /* fallthrough */ |
460 | case -ERESTARTNOINTR: | 465 | case -ERESTARTNOINTR: |
461 | regs->d0 = regs->orig_d0; | 466 | regs->d0 = regs->orig_d0; |
462 | regs->pc -= 2; | 467 | stepback(regs); |
463 | } | 468 | } |
464 | } | 469 | } |
465 | 470 | ||
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs) | |||
527 | case -ERESTARTSYS: | 532 | case -ERESTARTSYS: |
528 | case -ERESTARTNOINTR: | 533 | case -ERESTARTNOINTR: |
529 | regs->d0 = regs->orig_d0; | 534 | regs->d0 = regs->orig_d0; |
530 | regs->pc -= 2; | 535 | stepback(regs); |
531 | break; | 536 | break; |
532 | 537 | ||
533 | case -ERESTART_RESTARTBLOCK: | 538 | case -ERESTART_RESTARTBLOCK: |
534 | regs->d0 = __NR_restart_syscall; | 539 | regs->d0 = __NR_restart_syscall; |
535 | regs->pc -= 2; | 540 | stepback(regs); |
536 | break; | 541 | break; |
537 | } | 542 | } |
538 | } | 543 | } |
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile index 28b9d983db0c..1557277fbc5c 100644 --- a/arch/mn10300/mm/Makefile +++ b/arch/mn10300/mm/Makefile | |||
@@ -2,13 +2,11 @@ | |||
2 | # Makefile for the MN10300-specific memory management code | 2 | # Makefile for the MN10300-specific memory management code |
3 | # | 3 | # |
4 | 4 | ||
5 | cacheflush-y := cache.o cache-mn10300.o | ||
6 | cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o | ||
7 | |||
8 | cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o | ||
9 | |||
5 | obj-y := \ | 10 | obj-y := \ |
6 | init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ | 11 | init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ |
7 | misalignment.o dma-alloc.o | 12 | misalignment.o dma-alloc.o $(cacheflush-y) |
8 | |||
9 | ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y) | ||
10 | obj-y += cache.o cache-mn10300.o | ||
11 | ifeq ($(CONFIG_MN10300_CACHE_WBACK),y) | ||
12 | obj-y += cache-flush-mn10300.o | ||
13 | endif | ||
14 | endif | ||
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c new file mode 100644 index 000000000000..f669ea42aba6 --- /dev/null +++ b/arch/mn10300/mm/cache-disabled.c | |||
@@ -0,0 +1,21 @@ | |||
1 | /* Handle the cache being disabled | ||
2 | * | ||
3 | * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/mm.h> | ||
12 | |||
13 | /* | ||
14 | * allow userspace to flush the instruction cache | ||
15 | */ | ||
16 | asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) | ||
17 | { | ||
18 | if (end < start) | ||
19 | return -EINVAL; | ||
20 | return 0; | ||
21 | } | ||
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c index 1b76719ec1c3..9261217e8d2c 100644 --- a/arch/mn10300/mm/cache.c +++ b/arch/mn10300/mm/cache.c | |||
@@ -54,13 +54,30 @@ EXPORT_SYMBOL(flush_icache_page); | |||
54 | void flush_icache_range(unsigned long start, unsigned long end) | 54 | void flush_icache_range(unsigned long start, unsigned long end) |
55 | { | 55 | { |
56 | #ifdef CONFIG_MN10300_CACHE_WBACK | 56 | #ifdef CONFIG_MN10300_CACHE_WBACK |
57 | unsigned long addr, size, off; | 57 | unsigned long addr, size, base, off; |
58 | struct page *page; | 58 | struct page *page; |
59 | pgd_t *pgd; | 59 | pgd_t *pgd; |
60 | pud_t *pud; | 60 | pud_t *pud; |
61 | pmd_t *pmd; | 61 | pmd_t *pmd; |
62 | pte_t *ppte, pte; | 62 | pte_t *ppte, pte; |
63 | 63 | ||
64 | if (end > 0x80000000UL) { | ||
65 | /* addresses above 0xa0000000 do not go through the cache */ | ||
66 | if (end > 0xa0000000UL) { | ||
67 | end = 0xa0000000UL; | ||
68 | if (start >= end) | ||
69 | return; | ||
70 | } | ||
71 | |||
72 | /* kernel addresses between 0x80000000 and 0x9fffffff do not | ||
73 | * require page tables, so we just map such addresses directly */ | ||
74 | base = (start >= 0x80000000UL) ? start : 0x80000000UL; | ||
75 | mn10300_dcache_flush_range(base, end); | ||
76 | if (base == start) | ||
77 | goto invalidate; | ||
78 | end = base; | ||
79 | } | ||
80 | |||
64 | for (; start < end; start += size) { | 81 | for (; start < end; start += size) { |
65 | /* work out how much of the page to flush */ | 82 | /* work out how much of the page to flush */ |
66 | off = start & (PAGE_SIZE - 1); | 83 | off = start & (PAGE_SIZE - 1); |
@@ -104,6 +121,7 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
104 | } | 121 | } |
105 | #endif | 122 | #endif |
106 | 123 | ||
124 | invalidate: | ||
107 | mn10300_icache_inv(); | 125 | mn10300_icache_inv(); |
108 | } | 126 | } |
109 | EXPORT_SYMBOL(flush_icache_range); | 127 | EXPORT_SYMBOL(flush_icache_range); |
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 159a2b81e90c..6e81bb596e5b 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
@@ -941,11 +941,10 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
941 | nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; | 941 | nsyms = newptr - (Elf_Sym *)symhdr->sh_addr; |
942 | DEBUGP("NEW num_symtab %lu\n", nsyms); | 942 | DEBUGP("NEW num_symtab %lu\n", nsyms); |
943 | symhdr->sh_size = nsyms * sizeof(Elf_Sym); | 943 | symhdr->sh_size = nsyms * sizeof(Elf_Sym); |
944 | return module_bug_finalize(hdr, sechdrs, me); | 944 | return 0; |
945 | } | 945 | } |
946 | 946 | ||
947 | void module_arch_cleanup(struct module *mod) | 947 | void module_arch_cleanup(struct module *mod) |
948 | { | 948 | { |
949 | deregister_unwind_table(mod); | 949 | deregister_unwind_table(mod); |
950 | module_bug_cleanup(mod); | ||
951 | } | 950 | } |
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 477c663e0140..49cee9df225b 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c | |||
@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
63 | const Elf_Shdr *sechdrs, struct module *me) | 63 | const Elf_Shdr *sechdrs, struct module *me) |
64 | { | 64 | { |
65 | const Elf_Shdr *sect; | 65 | const Elf_Shdr *sect; |
66 | int err; | ||
67 | |||
68 | err = module_bug_finalize(hdr, sechdrs, me); | ||
69 | if (err) | ||
70 | return err; | ||
71 | 66 | ||
72 | /* Apply feature fixups */ | 67 | /* Apply feature fixups */ |
73 | sect = find_section(hdr, sechdrs, "__ftr_fixup"); | 68 | sect = find_section(hdr, sechdrs, "__ftr_fixup"); |
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
101 | 96 | ||
102 | void module_arch_cleanup(struct module *mod) | 97 | void module_arch_cleanup(struct module *mod) |
103 | { | 98 | { |
104 | module_bug_cleanup(mod); | ||
105 | } | 99 | } |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index 95ad9dad298e..d05ae4204bbf 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -23,18 +23,6 @@ | |||
23 | #include "ppc32.h" | 23 | #include "ppc32.h" |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* | ||
27 | * Store another value in a callchain_entry. | ||
28 | */ | ||
29 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
30 | { | ||
31 | unsigned int nr = entry->nr; | ||
32 | |||
33 | if (nr < PERF_MAX_STACK_DEPTH) { | ||
34 | entry->ip[nr] = ip; | ||
35 | entry->nr = nr + 1; | ||
36 | } | ||
37 | } | ||
38 | 26 | ||
39 | /* | 27 | /* |
40 | * Is sp valid as the address of the next kernel stack frame after prev_sp? | 28 | * Is sp valid as the address of the next kernel stack frame after prev_sp? |
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | |||
58 | return 0; | 46 | return 0; |
59 | } | 47 | } |
60 | 48 | ||
61 | static void perf_callchain_kernel(struct pt_regs *regs, | 49 | void |
62 | struct perf_callchain_entry *entry) | 50 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
63 | { | 51 | { |
64 | unsigned long sp, next_sp; | 52 | unsigned long sp, next_sp; |
65 | unsigned long next_ip; | 53 | unsigned long next_ip; |
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
69 | 57 | ||
70 | lr = regs->link; | 58 | lr = regs->link; |
71 | sp = regs->gpr[1]; | 59 | sp = regs->gpr[1]; |
72 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 60 | perf_callchain_store(entry, regs->nip); |
73 | callchain_store(entry, regs->nip); | ||
74 | 61 | ||
75 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | 62 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) |
76 | return; | 63 | return; |
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
89 | next_ip = regs->nip; | 76 | next_ip = regs->nip; |
90 | lr = regs->link; | 77 | lr = regs->link; |
91 | level = 0; | 78 | level = 0; |
92 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 79 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); |
93 | 80 | ||
94 | } else { | 81 | } else { |
95 | if (level == 0) | 82 | if (level == 0) |
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
111 | ++level; | 98 | ++level; |
112 | } | 99 | } |
113 | 100 | ||
114 | callchain_store(entry, next_ip); | 101 | perf_callchain_store(entry, next_ip); |
115 | if (!valid_next_sp(next_sp, sp)) | 102 | if (!valid_next_sp(next_sp, sp)) |
116 | return; | 103 | return; |
117 | sp = next_sp; | 104 | sp = next_sp; |
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp) | |||
233 | puc == (unsigned long) &sf->uc; | 220 | puc == (unsigned long) &sf->uc; |
234 | } | 221 | } |
235 | 222 | ||
236 | static void perf_callchain_user_64(struct pt_regs *regs, | 223 | static void perf_callchain_user_64(struct perf_callchain_entry *entry, |
237 | struct perf_callchain_entry *entry) | 224 | struct pt_regs *regs) |
238 | { | 225 | { |
239 | unsigned long sp, next_sp; | 226 | unsigned long sp, next_sp; |
240 | unsigned long next_ip; | 227 | unsigned long next_ip; |
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
246 | next_ip = regs->nip; | 233 | next_ip = regs->nip; |
247 | lr = regs->link; | 234 | lr = regs->link; |
248 | sp = regs->gpr[1]; | 235 | sp = regs->gpr[1]; |
249 | callchain_store(entry, PERF_CONTEXT_USER); | 236 | perf_callchain_store(entry, next_ip); |
250 | callchain_store(entry, next_ip); | ||
251 | 237 | ||
252 | for (;;) { | 238 | for (;;) { |
253 | fp = (unsigned long __user *) sp; | 239 | fp = (unsigned long __user *) sp; |
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
276 | read_user_stack_64(&uregs[PT_R1], &sp)) | 262 | read_user_stack_64(&uregs[PT_R1], &sp)) |
277 | return; | 263 | return; |
278 | level = 0; | 264 | level = 0; |
279 | callchain_store(entry, PERF_CONTEXT_USER); | 265 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
280 | callchain_store(entry, next_ip); | 266 | perf_callchain_store(entry, next_ip); |
281 | continue; | 267 | continue; |
282 | } | 268 | } |
283 | 269 | ||
284 | if (level == 0) | 270 | if (level == 0) |
285 | next_ip = lr; | 271 | next_ip = lr; |
286 | callchain_store(entry, next_ip); | 272 | perf_callchain_store(entry, next_ip); |
287 | ++level; | 273 | ++level; |
288 | sp = next_sp; | 274 | sp = next_sp; |
289 | } | 275 | } |
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | |||
315 | return __get_user_inatomic(*ret, ptr); | 301 | return __get_user_inatomic(*ret, ptr); |
316 | } | 302 | } |
317 | 303 | ||
318 | static inline void perf_callchain_user_64(struct pt_regs *regs, | 304 | static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, |
319 | struct perf_callchain_entry *entry) | 305 | struct pt_regs *regs) |
320 | { | 306 | { |
321 | } | 307 | } |
322 | 308 | ||
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp, | |||
435 | return mctx->mc_gregs; | 421 | return mctx->mc_gregs; |
436 | } | 422 | } |
437 | 423 | ||
438 | static void perf_callchain_user_32(struct pt_regs *regs, | 424 | static void perf_callchain_user_32(struct perf_callchain_entry *entry, |
439 | struct perf_callchain_entry *entry) | 425 | struct pt_regs *regs) |
440 | { | 426 | { |
441 | unsigned int sp, next_sp; | 427 | unsigned int sp, next_sp; |
442 | unsigned int next_ip; | 428 | unsigned int next_ip; |
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
447 | next_ip = regs->nip; | 433 | next_ip = regs->nip; |
448 | lr = regs->link; | 434 | lr = regs->link; |
449 | sp = regs->gpr[1]; | 435 | sp = regs->gpr[1]; |
450 | callchain_store(entry, PERF_CONTEXT_USER); | 436 | perf_callchain_store(entry, next_ip); |
451 | callchain_store(entry, next_ip); | ||
452 | 437 | ||
453 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | 438 | while (entry->nr < PERF_MAX_STACK_DEPTH) { |
454 | fp = (unsigned int __user *) (unsigned long) sp; | 439 | fp = (unsigned int __user *) (unsigned long) sp; |
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
470 | read_user_stack_32(&uregs[PT_R1], &sp)) | 455 | read_user_stack_32(&uregs[PT_R1], &sp)) |
471 | return; | 456 | return; |
472 | level = 0; | 457 | level = 0; |
473 | callchain_store(entry, PERF_CONTEXT_USER); | 458 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
474 | callchain_store(entry, next_ip); | 459 | perf_callchain_store(entry, next_ip); |
475 | continue; | 460 | continue; |
476 | } | 461 | } |
477 | 462 | ||
478 | if (level == 0) | 463 | if (level == 0) |
479 | next_ip = lr; | 464 | next_ip = lr; |
480 | callchain_store(entry, next_ip); | 465 | perf_callchain_store(entry, next_ip); |
481 | ++level; | 466 | ++level; |
482 | sp = next_sp; | 467 | sp = next_sp; |
483 | } | 468 | } |
484 | } | 469 | } |
485 | 470 | ||
486 | /* | 471 | void |
487 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | 472 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
488 | * we don't need separate irq and nmi entries here. | ||
489 | */ | ||
490 | static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain); | ||
491 | |||
492 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
493 | { | 473 | { |
494 | struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain); | 474 | if (current_is_64bit()) |
495 | 475 | perf_callchain_user_64(entry, regs); | |
496 | entry->nr = 0; | 476 | else |
497 | 477 | perf_callchain_user_32(entry, regs); | |
498 | if (!user_mode(regs)) { | ||
499 | perf_callchain_kernel(regs, entry); | ||
500 | if (current->mm) | ||
501 | regs = task_pt_regs(current); | ||
502 | else | ||
503 | regs = NULL; | ||
504 | } | ||
505 | |||
506 | if (regs) { | ||
507 | if (current_is_64bit()) | ||
508 | perf_callchain_user_64(regs, entry); | ||
509 | else | ||
510 | perf_callchain_user_32(regs, entry); | ||
511 | } | ||
512 | |||
513 | return entry; | ||
514 | } | 478 | } |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index d301a30445e0..9cb4924b6c07 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -402,6 +402,9 @@ static void power_pmu_read(struct perf_event *event) | |||
402 | { | 402 | { |
403 | s64 val, delta, prev; | 403 | s64 val, delta, prev; |
404 | 404 | ||
405 | if (event->hw.state & PERF_HES_STOPPED) | ||
406 | return; | ||
407 | |||
405 | if (!event->hw.idx) | 408 | if (!event->hw.idx) |
406 | return; | 409 | return; |
407 | /* | 410 | /* |
@@ -517,7 +520,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | |||
517 | * Disable all events to prevent PMU interrupts and to allow | 520 | * Disable all events to prevent PMU interrupts and to allow |
518 | * events to be added or removed. | 521 | * events to be added or removed. |
519 | */ | 522 | */ |
520 | void hw_perf_disable(void) | 523 | static void power_pmu_disable(struct pmu *pmu) |
521 | { | 524 | { |
522 | struct cpu_hw_events *cpuhw; | 525 | struct cpu_hw_events *cpuhw; |
523 | unsigned long flags; | 526 | unsigned long flags; |
@@ -565,7 +568,7 @@ void hw_perf_disable(void) | |||
565 | * If we were previously disabled and events were added, then | 568 | * If we were previously disabled and events were added, then |
566 | * put the new config on the PMU. | 569 | * put the new config on the PMU. |
567 | */ | 570 | */ |
568 | void hw_perf_enable(void) | 571 | static void power_pmu_enable(struct pmu *pmu) |
569 | { | 572 | { |
570 | struct perf_event *event; | 573 | struct perf_event *event; |
571 | struct cpu_hw_events *cpuhw; | 574 | struct cpu_hw_events *cpuhw; |
@@ -672,6 +675,8 @@ void hw_perf_enable(void) | |||
672 | } | 675 | } |
673 | local64_set(&event->hw.prev_count, val); | 676 | local64_set(&event->hw.prev_count, val); |
674 | event->hw.idx = idx; | 677 | event->hw.idx = idx; |
678 | if (event->hw.state & PERF_HES_STOPPED) | ||
679 | val = 0; | ||
675 | write_pmc(idx, val); | 680 | write_pmc(idx, val); |
676 | perf_event_update_userpage(event); | 681 | perf_event_update_userpage(event); |
677 | } | 682 | } |
@@ -727,7 +732,7 @@ static int collect_events(struct perf_event *group, int max_count, | |||
727 | * re-enable the PMU in order to get hw_perf_enable to do the | 732 | * re-enable the PMU in order to get hw_perf_enable to do the |
728 | * actual work of reconfiguring the PMU. | 733 | * actual work of reconfiguring the PMU. |
729 | */ | 734 | */ |
730 | static int power_pmu_enable(struct perf_event *event) | 735 | static int power_pmu_add(struct perf_event *event, int ef_flags) |
731 | { | 736 | { |
732 | struct cpu_hw_events *cpuhw; | 737 | struct cpu_hw_events *cpuhw; |
733 | unsigned long flags; | 738 | unsigned long flags; |
@@ -735,7 +740,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
735 | int ret = -EAGAIN; | 740 | int ret = -EAGAIN; |
736 | 741 | ||
737 | local_irq_save(flags); | 742 | local_irq_save(flags); |
738 | perf_disable(); | 743 | perf_pmu_disable(event->pmu); |
739 | 744 | ||
740 | /* | 745 | /* |
741 | * Add the event to the list (if there is room) | 746 | * Add the event to the list (if there is room) |
@@ -749,6 +754,9 @@ static int power_pmu_enable(struct perf_event *event) | |||
749 | cpuhw->events[n0] = event->hw.config; | 754 | cpuhw->events[n0] = event->hw.config; |
750 | cpuhw->flags[n0] = event->hw.event_base; | 755 | cpuhw->flags[n0] = event->hw.event_base; |
751 | 756 | ||
757 | if (!(ef_flags & PERF_EF_START)) | ||
758 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
759 | |||
752 | /* | 760 | /* |
753 | * If group events scheduling transaction was started, | 761 | * If group events scheduling transaction was started, |
754 | * skip the schedulability test here, it will be peformed | 762 | * skip the schedulability test here, it will be peformed |
@@ -769,7 +777,7 @@ nocheck: | |||
769 | 777 | ||
770 | ret = 0; | 778 | ret = 0; |
771 | out: | 779 | out: |
772 | perf_enable(); | 780 | perf_pmu_enable(event->pmu); |
773 | local_irq_restore(flags); | 781 | local_irq_restore(flags); |
774 | return ret; | 782 | return ret; |
775 | } | 783 | } |
@@ -777,14 +785,14 @@ nocheck: | |||
777 | /* | 785 | /* |
778 | * Remove a event from the PMU. | 786 | * Remove a event from the PMU. |
779 | */ | 787 | */ |
780 | static void power_pmu_disable(struct perf_event *event) | 788 | static void power_pmu_del(struct perf_event *event, int ef_flags) |
781 | { | 789 | { |
782 | struct cpu_hw_events *cpuhw; | 790 | struct cpu_hw_events *cpuhw; |
783 | long i; | 791 | long i; |
784 | unsigned long flags; | 792 | unsigned long flags; |
785 | 793 | ||
786 | local_irq_save(flags); | 794 | local_irq_save(flags); |
787 | perf_disable(); | 795 | perf_pmu_disable(event->pmu); |
788 | 796 | ||
789 | power_pmu_read(event); | 797 | power_pmu_read(event); |
790 | 798 | ||
@@ -821,34 +829,60 @@ static void power_pmu_disable(struct perf_event *event) | |||
821 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | 829 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); |
822 | } | 830 | } |
823 | 831 | ||
824 | perf_enable(); | 832 | perf_pmu_enable(event->pmu); |
825 | local_irq_restore(flags); | 833 | local_irq_restore(flags); |
826 | } | 834 | } |
827 | 835 | ||
828 | /* | 836 | /* |
829 | * Re-enable interrupts on a event after they were throttled | 837 | * POWER-PMU does not support disabling individual counters, hence |
830 | * because they were coming too fast. | 838 | * program their cycle counter to their max value and ignore the interrupts. |
831 | */ | 839 | */ |
832 | static void power_pmu_unthrottle(struct perf_event *event) | 840 | |
841 | static void power_pmu_start(struct perf_event *event, int ef_flags) | ||
842 | { | ||
843 | unsigned long flags; | ||
844 | s64 left; | ||
845 | |||
846 | if (!event->hw.idx || !event->hw.sample_period) | ||
847 | return; | ||
848 | |||
849 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
850 | return; | ||
851 | |||
852 | if (ef_flags & PERF_EF_RELOAD) | ||
853 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
854 | |||
855 | local_irq_save(flags); | ||
856 | perf_pmu_disable(event->pmu); | ||
857 | |||
858 | event->hw.state = 0; | ||
859 | left = local64_read(&event->hw.period_left); | ||
860 | write_pmc(event->hw.idx, left); | ||
861 | |||
862 | perf_event_update_userpage(event); | ||
863 | perf_pmu_enable(event->pmu); | ||
864 | local_irq_restore(flags); | ||
865 | } | ||
866 | |||
867 | static void power_pmu_stop(struct perf_event *event, int ef_flags) | ||
833 | { | 868 | { |
834 | s64 val, left; | ||
835 | unsigned long flags; | 869 | unsigned long flags; |
836 | 870 | ||
837 | if (!event->hw.idx || !event->hw.sample_period) | 871 | if (!event->hw.idx || !event->hw.sample_period) |
838 | return; | 872 | return; |
873 | |||
874 | if (event->hw.state & PERF_HES_STOPPED) | ||
875 | return; | ||
876 | |||
839 | local_irq_save(flags); | 877 | local_irq_save(flags); |
840 | perf_disable(); | 878 | perf_pmu_disable(event->pmu); |
879 | |||
841 | power_pmu_read(event); | 880 | power_pmu_read(event); |
842 | left = event->hw.sample_period; | 881 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
843 | event->hw.last_period = left; | 882 | write_pmc(event->hw.idx, 0); |
844 | val = 0; | 883 | |
845 | if (left < 0x80000000L) | ||
846 | val = 0x80000000L - left; | ||
847 | write_pmc(event->hw.idx, val); | ||
848 | local64_set(&event->hw.prev_count, val); | ||
849 | local64_set(&event->hw.period_left, left); | ||
850 | perf_event_update_userpage(event); | 884 | perf_event_update_userpage(event); |
851 | perf_enable(); | 885 | perf_pmu_enable(event->pmu); |
852 | local_irq_restore(flags); | 886 | local_irq_restore(flags); |
853 | } | 887 | } |
854 | 888 | ||
@@ -857,10 +891,11 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
857 | * Set the flag to make pmu::enable() not perform the | 891 | * Set the flag to make pmu::enable() not perform the |
858 | * schedulability test, it will be performed at commit time | 892 | * schedulability test, it will be performed at commit time |
859 | */ | 893 | */ |
860 | void power_pmu_start_txn(const struct pmu *pmu) | 894 | void power_pmu_start_txn(struct pmu *pmu) |
861 | { | 895 | { |
862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 896 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
863 | 897 | ||
898 | perf_pmu_disable(pmu); | ||
864 | cpuhw->group_flag |= PERF_EVENT_TXN; | 899 | cpuhw->group_flag |= PERF_EVENT_TXN; |
865 | cpuhw->n_txn_start = cpuhw->n_events; | 900 | cpuhw->n_txn_start = cpuhw->n_events; |
866 | } | 901 | } |
@@ -870,11 +905,12 @@ void power_pmu_start_txn(const struct pmu *pmu) | |||
870 | * Clear the flag and pmu::enable() will perform the | 905 | * Clear the flag and pmu::enable() will perform the |
871 | * schedulability test. | 906 | * schedulability test. |
872 | */ | 907 | */ |
873 | void power_pmu_cancel_txn(const struct pmu *pmu) | 908 | void power_pmu_cancel_txn(struct pmu *pmu) |
874 | { | 909 | { |
875 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 910 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
876 | 911 | ||
877 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 912 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
913 | perf_pmu_enable(pmu); | ||
878 | } | 914 | } |
879 | 915 | ||
880 | /* | 916 | /* |
@@ -882,7 +918,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) | |||
882 | * Perform the group schedulability test as a whole | 918 | * Perform the group schedulability test as a whole |
883 | * Return 0 if success | 919 | * Return 0 if success |
884 | */ | 920 | */ |
885 | int power_pmu_commit_txn(const struct pmu *pmu) | 921 | int power_pmu_commit_txn(struct pmu *pmu) |
886 | { | 922 | { |
887 | struct cpu_hw_events *cpuhw; | 923 | struct cpu_hw_events *cpuhw; |
888 | long i, n; | 924 | long i, n; |
@@ -901,19 +937,10 @@ int power_pmu_commit_txn(const struct pmu *pmu) | |||
901 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 937 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
902 | 938 | ||
903 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 939 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
940 | perf_pmu_enable(pmu); | ||
904 | return 0; | 941 | return 0; |
905 | } | 942 | } |
906 | 943 | ||
907 | struct pmu power_pmu = { | ||
908 | .enable = power_pmu_enable, | ||
909 | .disable = power_pmu_disable, | ||
910 | .read = power_pmu_read, | ||
911 | .unthrottle = power_pmu_unthrottle, | ||
912 | .start_txn = power_pmu_start_txn, | ||
913 | .cancel_txn = power_pmu_cancel_txn, | ||
914 | .commit_txn = power_pmu_commit_txn, | ||
915 | }; | ||
916 | |||
917 | /* | 944 | /* |
918 | * Return 1 if we might be able to put event on a limited PMC, | 945 | * Return 1 if we might be able to put event on a limited PMC, |
919 | * or 0 if not. | 946 | * or 0 if not. |
@@ -1014,7 +1041,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
1014 | return 0; | 1041 | return 0; |
1015 | } | 1042 | } |
1016 | 1043 | ||
1017 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1044 | static int power_pmu_event_init(struct perf_event *event) |
1018 | { | 1045 | { |
1019 | u64 ev; | 1046 | u64 ev; |
1020 | unsigned long flags; | 1047 | unsigned long flags; |
@@ -1026,25 +1053,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1026 | struct cpu_hw_events *cpuhw; | 1053 | struct cpu_hw_events *cpuhw; |
1027 | 1054 | ||
1028 | if (!ppmu) | 1055 | if (!ppmu) |
1029 | return ERR_PTR(-ENXIO); | 1056 | return -ENOENT; |
1057 | |||
1030 | switch (event->attr.type) { | 1058 | switch (event->attr.type) { |
1031 | case PERF_TYPE_HARDWARE: | 1059 | case PERF_TYPE_HARDWARE: |
1032 | ev = event->attr.config; | 1060 | ev = event->attr.config; |
1033 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 1061 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
1034 | return ERR_PTR(-EOPNOTSUPP); | 1062 | return -EOPNOTSUPP; |
1035 | ev = ppmu->generic_events[ev]; | 1063 | ev = ppmu->generic_events[ev]; |
1036 | break; | 1064 | break; |
1037 | case PERF_TYPE_HW_CACHE: | 1065 | case PERF_TYPE_HW_CACHE: |
1038 | err = hw_perf_cache_event(event->attr.config, &ev); | 1066 | err = hw_perf_cache_event(event->attr.config, &ev); |
1039 | if (err) | 1067 | if (err) |
1040 | return ERR_PTR(err); | 1068 | return err; |
1041 | break; | 1069 | break; |
1042 | case PERF_TYPE_RAW: | 1070 | case PERF_TYPE_RAW: |
1043 | ev = event->attr.config; | 1071 | ev = event->attr.config; |
1044 | break; | 1072 | break; |
1045 | default: | 1073 | default: |
1046 | return ERR_PTR(-EINVAL); | 1074 | return -ENOENT; |
1047 | } | 1075 | } |
1076 | |||
1048 | event->hw.config_base = ev; | 1077 | event->hw.config_base = ev; |
1049 | event->hw.idx = 0; | 1078 | event->hw.idx = 0; |
1050 | 1079 | ||
@@ -1081,7 +1110,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1081 | */ | 1110 | */ |
1082 | ev = normal_pmc_alternative(ev, flags); | 1111 | ev = normal_pmc_alternative(ev, flags); |
1083 | if (!ev) | 1112 | if (!ev) |
1084 | return ERR_PTR(-EINVAL); | 1113 | return -EINVAL; |
1085 | } | 1114 | } |
1086 | } | 1115 | } |
1087 | 1116 | ||
@@ -1095,19 +1124,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1095 | n = collect_events(event->group_leader, ppmu->n_counter - 1, | 1124 | n = collect_events(event->group_leader, ppmu->n_counter - 1, |
1096 | ctrs, events, cflags); | 1125 | ctrs, events, cflags); |
1097 | if (n < 0) | 1126 | if (n < 0) |
1098 | return ERR_PTR(-EINVAL); | 1127 | return -EINVAL; |
1099 | } | 1128 | } |
1100 | events[n] = ev; | 1129 | events[n] = ev; |
1101 | ctrs[n] = event; | 1130 | ctrs[n] = event; |
1102 | cflags[n] = flags; | 1131 | cflags[n] = flags; |
1103 | if (check_excludes(ctrs, cflags, n, 1)) | 1132 | if (check_excludes(ctrs, cflags, n, 1)) |
1104 | return ERR_PTR(-EINVAL); | 1133 | return -EINVAL; |
1105 | 1134 | ||
1106 | cpuhw = &get_cpu_var(cpu_hw_events); | 1135 | cpuhw = &get_cpu_var(cpu_hw_events); |
1107 | err = power_check_constraints(cpuhw, events, cflags, n + 1); | 1136 | err = power_check_constraints(cpuhw, events, cflags, n + 1); |
1108 | put_cpu_var(cpu_hw_events); | 1137 | put_cpu_var(cpu_hw_events); |
1109 | if (err) | 1138 | if (err) |
1110 | return ERR_PTR(-EINVAL); | 1139 | return -EINVAL; |
1111 | 1140 | ||
1112 | event->hw.config = events[n]; | 1141 | event->hw.config = events[n]; |
1113 | event->hw.event_base = cflags[n]; | 1142 | event->hw.event_base = cflags[n]; |
@@ -1132,11 +1161,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1132 | } | 1161 | } |
1133 | event->destroy = hw_perf_event_destroy; | 1162 | event->destroy = hw_perf_event_destroy; |
1134 | 1163 | ||
1135 | if (err) | 1164 | return err; |
1136 | return ERR_PTR(err); | ||
1137 | return &power_pmu; | ||
1138 | } | 1165 | } |
1139 | 1166 | ||
1167 | struct pmu power_pmu = { | ||
1168 | .pmu_enable = power_pmu_enable, | ||
1169 | .pmu_disable = power_pmu_disable, | ||
1170 | .event_init = power_pmu_event_init, | ||
1171 | .add = power_pmu_add, | ||
1172 | .del = power_pmu_del, | ||
1173 | .start = power_pmu_start, | ||
1174 | .stop = power_pmu_stop, | ||
1175 | .read = power_pmu_read, | ||
1176 | .start_txn = power_pmu_start_txn, | ||
1177 | .cancel_txn = power_pmu_cancel_txn, | ||
1178 | .commit_txn = power_pmu_commit_txn, | ||
1179 | }; | ||
1180 | |||
1140 | /* | 1181 | /* |
1141 | * A counter has overflowed; update its count and record | 1182 | * A counter has overflowed; update its count and record |
1142 | * things if requested. Note that interrupts are hard-disabled | 1183 | * things if requested. Note that interrupts are hard-disabled |
@@ -1149,6 +1190,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1149 | s64 prev, delta, left; | 1190 | s64 prev, delta, left; |
1150 | int record = 0; | 1191 | int record = 0; |
1151 | 1192 | ||
1193 | if (event->hw.state & PERF_HES_STOPPED) { | ||
1194 | write_pmc(event->hw.idx, 0); | ||
1195 | return; | ||
1196 | } | ||
1197 | |||
1152 | /* we don't have to worry about interrupts here */ | 1198 | /* we don't have to worry about interrupts here */ |
1153 | prev = local64_read(&event->hw.prev_count); | 1199 | prev = local64_read(&event->hw.prev_count); |
1154 | delta = (val - prev) & 0xfffffffful; | 1200 | delta = (val - prev) & 0xfffffffful; |
@@ -1171,6 +1217,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1171 | val = 0x80000000LL - left; | 1217 | val = 0x80000000LL - left; |
1172 | } | 1218 | } |
1173 | 1219 | ||
1220 | write_pmc(event->hw.idx, val); | ||
1221 | local64_set(&event->hw.prev_count, val); | ||
1222 | local64_set(&event->hw.period_left, left); | ||
1223 | perf_event_update_userpage(event); | ||
1224 | |||
1174 | /* | 1225 | /* |
1175 | * Finally record data if requested. | 1226 | * Finally record data if requested. |
1176 | */ | 1227 | */ |
@@ -1183,23 +1234,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1183 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | 1234 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
1184 | perf_get_data_addr(regs, &data.addr); | 1235 | perf_get_data_addr(regs, &data.addr); |
1185 | 1236 | ||
1186 | if (perf_event_overflow(event, nmi, &data, regs)) { | 1237 | if (perf_event_overflow(event, nmi, &data, regs)) |
1187 | /* | 1238 | power_pmu_stop(event, 0); |
1188 | * Interrupts are coming too fast - throttle them | ||
1189 | * by setting the event to 0, so it will be | ||
1190 | * at least 2^30 cycles until the next interrupt | ||
1191 | * (assuming each event counts at most 2 counts | ||
1192 | * per cycle). | ||
1193 | */ | ||
1194 | val = 0; | ||
1195 | left = ~0ULL >> 1; | ||
1196 | } | ||
1197 | } | 1239 | } |
1198 | |||
1199 | write_pmc(event->hw.idx, val); | ||
1200 | local64_set(&event->hw.prev_count, val); | ||
1201 | local64_set(&event->hw.period_left, left); | ||
1202 | perf_event_update_userpage(event); | ||
1203 | } | 1240 | } |
1204 | 1241 | ||
1205 | /* | 1242 | /* |
@@ -1342,6 +1379,7 @@ int register_power_pmu(struct power_pmu *pmu) | |||
1342 | freeze_events_kernel = MMCR0_FCHV; | 1379 | freeze_events_kernel = MMCR0_FCHV; |
1343 | #endif /* CONFIG_PPC64 */ | 1380 | #endif /* CONFIG_PPC64 */ |
1344 | 1381 | ||
1382 | perf_pmu_register(&power_pmu); | ||
1345 | perf_cpu_notifier(power_pmu_notifier); | 1383 | perf_cpu_notifier(power_pmu_notifier); |
1346 | 1384 | ||
1347 | return 0; | 1385 | return 0; |
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 1ba45471ae43..7ecca59ddf77 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
@@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
156 | { | 156 | { |
157 | s64 val, delta, prev; | 157 | s64 val, delta, prev; |
158 | 158 | ||
159 | if (event->hw.state & PERF_HES_STOPPED) | ||
160 | return; | ||
161 | |||
159 | /* | 162 | /* |
160 | * Performance monitor interrupts come even when interrupts | 163 | * Performance monitor interrupts come even when interrupts |
161 | * are soft-disabled, as long as interrupts are hard-enabled. | 164 | * are soft-disabled, as long as interrupts are hard-enabled. |
@@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
177 | * Disable all events to prevent PMU interrupts and to allow | 180 | * Disable all events to prevent PMU interrupts and to allow |
178 | * events to be added or removed. | 181 | * events to be added or removed. |
179 | */ | 182 | */ |
180 | void hw_perf_disable(void) | 183 | static void fsl_emb_pmu_disable(struct pmu *pmu) |
181 | { | 184 | { |
182 | struct cpu_hw_events *cpuhw; | 185 | struct cpu_hw_events *cpuhw; |
183 | unsigned long flags; | 186 | unsigned long flags; |
@@ -216,7 +219,7 @@ void hw_perf_disable(void) | |||
216 | * If we were previously disabled and events were added, then | 219 | * If we were previously disabled and events were added, then |
217 | * put the new config on the PMU. | 220 | * put the new config on the PMU. |
218 | */ | 221 | */ |
219 | void hw_perf_enable(void) | 222 | static void fsl_emb_pmu_enable(struct pmu *pmu) |
220 | { | 223 | { |
221 | struct cpu_hw_events *cpuhw; | 224 | struct cpu_hw_events *cpuhw; |
222 | unsigned long flags; | 225 | unsigned long flags; |
@@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count, | |||
262 | return n; | 265 | return n; |
263 | } | 266 | } |
264 | 267 | ||
265 | /* perf must be disabled, context locked on entry */ | 268 | /* context locked on entry */ |
266 | static int fsl_emb_pmu_enable(struct perf_event *event) | 269 | static int fsl_emb_pmu_add(struct perf_event *event, int flags) |
267 | { | 270 | { |
268 | struct cpu_hw_events *cpuhw; | 271 | struct cpu_hw_events *cpuhw; |
269 | int ret = -EAGAIN; | 272 | int ret = -EAGAIN; |
@@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
271 | u64 val; | 274 | u64 val; |
272 | int i; | 275 | int i; |
273 | 276 | ||
277 | perf_pmu_disable(event->pmu); | ||
274 | cpuhw = &get_cpu_var(cpu_hw_events); | 278 | cpuhw = &get_cpu_var(cpu_hw_events); |
275 | 279 | ||
276 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) | 280 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) |
@@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
301 | val = 0x80000000L - left; | 305 | val = 0x80000000L - left; |
302 | } | 306 | } |
303 | local64_set(&event->hw.prev_count, val); | 307 | local64_set(&event->hw.prev_count, val); |
308 | |||
309 | if (!(flags & PERF_EF_START)) { | ||
310 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
311 | val = 0; | ||
312 | } | ||
313 | |||
304 | write_pmc(i, val); | 314 | write_pmc(i, val); |
305 | perf_event_update_userpage(event); | 315 | perf_event_update_userpage(event); |
306 | 316 | ||
@@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
310 | ret = 0; | 320 | ret = 0; |
311 | out: | 321 | out: |
312 | put_cpu_var(cpu_hw_events); | 322 | put_cpu_var(cpu_hw_events); |
323 | perf_pmu_enable(event->pmu); | ||
313 | return ret; | 324 | return ret; |
314 | } | 325 | } |
315 | 326 | ||
316 | /* perf must be disabled, context locked on entry */ | 327 | /* context locked on entry */ |
317 | static void fsl_emb_pmu_disable(struct perf_event *event) | 328 | static void fsl_emb_pmu_del(struct perf_event *event, int flags) |
318 | { | 329 | { |
319 | struct cpu_hw_events *cpuhw; | 330 | struct cpu_hw_events *cpuhw; |
320 | int i = event->hw.idx; | 331 | int i = event->hw.idx; |
321 | 332 | ||
333 | perf_pmu_disable(event->pmu); | ||
322 | if (i < 0) | 334 | if (i < 0) |
323 | goto out; | 335 | goto out; |
324 | 336 | ||
@@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event) | |||
346 | cpuhw->n_events--; | 358 | cpuhw->n_events--; |
347 | 359 | ||
348 | out: | 360 | out: |
361 | perf_pmu_enable(event->pmu); | ||
349 | put_cpu_var(cpu_hw_events); | 362 | put_cpu_var(cpu_hw_events); |
350 | } | 363 | } |
351 | 364 | ||
352 | /* | 365 | static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) |
353 | * Re-enable interrupts on a event after they were throttled | ||
354 | * because they were coming too fast. | ||
355 | * | ||
356 | * Context is locked on entry, but perf is not disabled. | ||
357 | */ | ||
358 | static void fsl_emb_pmu_unthrottle(struct perf_event *event) | ||
359 | { | 366 | { |
360 | s64 val, left; | ||
361 | unsigned long flags; | 367 | unsigned long flags; |
368 | s64 left; | ||
362 | 369 | ||
363 | if (event->hw.idx < 0 || !event->hw.sample_period) | 370 | if (event->hw.idx < 0 || !event->hw.sample_period) |
364 | return; | 371 | return; |
372 | |||
373 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
374 | return; | ||
375 | |||
376 | if (ef_flags & PERF_EF_RELOAD) | ||
377 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
378 | |||
365 | local_irq_save(flags); | 379 | local_irq_save(flags); |
366 | perf_disable(); | 380 | perf_pmu_disable(event->pmu); |
367 | fsl_emb_pmu_read(event); | 381 | |
368 | left = event->hw.sample_period; | 382 | event->hw.state = 0; |
369 | event->hw.last_period = left; | 383 | left = local64_read(&event->hw.period_left); |
370 | val = 0; | 384 | write_pmc(event->hw.idx, left); |
371 | if (left < 0x80000000L) | 385 | |
372 | val = 0x80000000L - left; | ||
373 | write_pmc(event->hw.idx, val); | ||
374 | local64_set(&event->hw.prev_count, val); | ||
375 | local64_set(&event->hw.period_left, left); | ||
376 | perf_event_update_userpage(event); | 386 | perf_event_update_userpage(event); |
377 | perf_enable(); | 387 | perf_pmu_enable(event->pmu); |
378 | local_irq_restore(flags); | 388 | local_irq_restore(flags); |
379 | } | 389 | } |
380 | 390 | ||
381 | static struct pmu fsl_emb_pmu = { | 391 | static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) |
382 | .enable = fsl_emb_pmu_enable, | 392 | { |
383 | .disable = fsl_emb_pmu_disable, | 393 | unsigned long flags; |
384 | .read = fsl_emb_pmu_read, | 394 | |
385 | .unthrottle = fsl_emb_pmu_unthrottle, | 395 | if (event->hw.idx < 0 || !event->hw.sample_period) |
386 | }; | 396 | return; |
397 | |||
398 | if (event->hw.state & PERF_HES_STOPPED) | ||
399 | return; | ||
400 | |||
401 | local_irq_save(flags); | ||
402 | perf_pmu_disable(event->pmu); | ||
403 | |||
404 | fsl_emb_pmu_read(event); | ||
405 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
406 | write_pmc(event->hw.idx, 0); | ||
407 | |||
408 | perf_event_update_userpage(event); | ||
409 | perf_pmu_enable(event->pmu); | ||
410 | local_irq_restore(flags); | ||
411 | } | ||
387 | 412 | ||
388 | /* | 413 | /* |
389 | * Release the PMU if this is the last perf_event. | 414 | * Release the PMU if this is the last perf_event. |
@@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
428 | return 0; | 453 | return 0; |
429 | } | 454 | } |
430 | 455 | ||
431 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 456 | static int fsl_emb_pmu_event_init(struct perf_event *event) |
432 | { | 457 | { |
433 | u64 ev; | 458 | u64 ev; |
434 | struct perf_event *events[MAX_HWEVENTS]; | 459 | struct perf_event *events[MAX_HWEVENTS]; |
@@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
441 | case PERF_TYPE_HARDWARE: | 466 | case PERF_TYPE_HARDWARE: |
442 | ev = event->attr.config; | 467 | ev = event->attr.config; |
443 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 468 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
444 | return ERR_PTR(-EOPNOTSUPP); | 469 | return -EOPNOTSUPP; |
445 | ev = ppmu->generic_events[ev]; | 470 | ev = ppmu->generic_events[ev]; |
446 | break; | 471 | break; |
447 | 472 | ||
448 | case PERF_TYPE_HW_CACHE: | 473 | case PERF_TYPE_HW_CACHE: |
449 | err = hw_perf_cache_event(event->attr.config, &ev); | 474 | err = hw_perf_cache_event(event->attr.config, &ev); |
450 | if (err) | 475 | if (err) |
451 | return ERR_PTR(err); | 476 | return err; |
452 | break; | 477 | break; |
453 | 478 | ||
454 | case PERF_TYPE_RAW: | 479 | case PERF_TYPE_RAW: |
@@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
456 | break; | 481 | break; |
457 | 482 | ||
458 | default: | 483 | default: |
459 | return ERR_PTR(-EINVAL); | 484 | return -ENOENT; |
460 | } | 485 | } |
461 | 486 | ||
462 | event->hw.config = ppmu->xlate_event(ev); | 487 | event->hw.config = ppmu->xlate_event(ev); |
463 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) | 488 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) |
464 | return ERR_PTR(-EINVAL); | 489 | return -EINVAL; |
465 | 490 | ||
466 | /* | 491 | /* |
467 | * If this is in a group, check if it can go on with all the | 492 | * If this is in a group, check if it can go on with all the |
@@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
473 | n = collect_events(event->group_leader, | 498 | n = collect_events(event->group_leader, |
474 | ppmu->n_counter - 1, events); | 499 | ppmu->n_counter - 1, events); |
475 | if (n < 0) | 500 | if (n < 0) |
476 | return ERR_PTR(-EINVAL); | 501 | return -EINVAL; |
477 | } | 502 | } |
478 | 503 | ||
479 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { | 504 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { |
@@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
484 | } | 509 | } |
485 | 510 | ||
486 | if (num_restricted >= ppmu->n_restricted) | 511 | if (num_restricted >= ppmu->n_restricted) |
487 | return ERR_PTR(-EINVAL); | 512 | return -EINVAL; |
488 | } | 513 | } |
489 | 514 | ||
490 | event->hw.idx = -1; | 515 | event->hw.idx = -1; |
@@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
497 | if (event->attr.exclude_kernel) | 522 | if (event->attr.exclude_kernel) |
498 | event->hw.config_base |= PMLCA_FCS; | 523 | event->hw.config_base |= PMLCA_FCS; |
499 | if (event->attr.exclude_idle) | 524 | if (event->attr.exclude_idle) |
500 | return ERR_PTR(-ENOTSUPP); | 525 | return -ENOTSUPP; |
501 | 526 | ||
502 | event->hw.last_period = event->hw.sample_period; | 527 | event->hw.last_period = event->hw.sample_period; |
503 | local64_set(&event->hw.period_left, event->hw.last_period); | 528 | local64_set(&event->hw.period_left, event->hw.last_period); |
@@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
523 | } | 548 | } |
524 | event->destroy = hw_perf_event_destroy; | 549 | event->destroy = hw_perf_event_destroy; |
525 | 550 | ||
526 | if (err) | 551 | return err; |
527 | return ERR_PTR(err); | ||
528 | return &fsl_emb_pmu; | ||
529 | } | 552 | } |
530 | 553 | ||
554 | static struct pmu fsl_emb_pmu = { | ||
555 | .pmu_enable = fsl_emb_pmu_enable, | ||
556 | .pmu_disable = fsl_emb_pmu_disable, | ||
557 | .event_init = fsl_emb_pmu_event_init, | ||
558 | .add = fsl_emb_pmu_add, | ||
559 | .del = fsl_emb_pmu_del, | ||
560 | .start = fsl_emb_pmu_start, | ||
561 | .stop = fsl_emb_pmu_stop, | ||
562 | .read = fsl_emb_pmu_read, | ||
563 | }; | ||
564 | |||
531 | /* | 565 | /* |
532 | * A counter has overflowed; update its count and record | 566 | * A counter has overflowed; update its count and record |
533 | * things if requested. Note that interrupts are hard-disabled | 567 | * things if requested. Note that interrupts are hard-disabled |
@@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
540 | s64 prev, delta, left; | 574 | s64 prev, delta, left; |
541 | int record = 0; | 575 | int record = 0; |
542 | 576 | ||
577 | if (event->hw.state & PERF_HES_STOPPED) { | ||
578 | write_pmc(event->hw.idx, 0); | ||
579 | return; | ||
580 | } | ||
581 | |||
543 | /* we don't have to worry about interrupts here */ | 582 | /* we don't have to worry about interrupts here */ |
544 | prev = local64_read(&event->hw.prev_count); | 583 | prev = local64_read(&event->hw.prev_count); |
545 | delta = (val - prev) & 0xfffffffful; | 584 | delta = (val - prev) & 0xfffffffful; |
@@ -562,6 +601,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
562 | val = 0x80000000LL - left; | 601 | val = 0x80000000LL - left; |
563 | } | 602 | } |
564 | 603 | ||
604 | write_pmc(event->hw.idx, val); | ||
605 | local64_set(&event->hw.prev_count, val); | ||
606 | local64_set(&event->hw.period_left, left); | ||
607 | perf_event_update_userpage(event); | ||
608 | |||
565 | /* | 609 | /* |
566 | * Finally record data if requested. | 610 | * Finally record data if requested. |
567 | */ | 611 | */ |
@@ -571,23 +615,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
571 | perf_sample_data_init(&data, 0); | 615 | perf_sample_data_init(&data, 0); |
572 | data.period = event->hw.last_period; | 616 | data.period = event->hw.last_period; |
573 | 617 | ||
574 | if (perf_event_overflow(event, nmi, &data, regs)) { | 618 | if (perf_event_overflow(event, nmi, &data, regs)) |
575 | /* | 619 | fsl_emb_pmu_stop(event, 0); |
576 | * Interrupts are coming too fast - throttle them | ||
577 | * by setting the event to 0, so it will be | ||
578 | * at least 2^30 cycles until the next interrupt | ||
579 | * (assuming each event counts at most 2 counts | ||
580 | * per cycle). | ||
581 | */ | ||
582 | val = 0; | ||
583 | left = ~0ULL >> 1; | ||
584 | } | ||
585 | } | 620 | } |
586 | |||
587 | write_pmc(event->hw.idx, val); | ||
588 | local64_set(&event->hw.prev_count, val); | ||
589 | local64_set(&event->hw.period_left, left); | ||
590 | perf_event_update_userpage(event); | ||
591 | } | 621 | } |
592 | 622 | ||
593 | static void perf_event_interrupt(struct pt_regs *regs) | 623 | static void perf_event_interrupt(struct pt_regs *regs) |
@@ -651,5 +681,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) | |||
651 | pr_info("%s performance monitor hardware support registered\n", | 681 | pr_info("%s performance monitor hardware support registered\n", |
652 | pmu->name); | 682 | pmu->name); |
653 | 683 | ||
684 | perf_pmu_register(&fsl_emb_pmu); | ||
685 | |||
654 | return 0; | 686 | return 0; |
655 | } | 687 | } |
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c index 5b243bd3eb3b..3dc2a8d262b8 100644 --- a/arch/powerpc/platforms/512x/clock.c +++ b/arch/powerpc/platforms/512x/clock.c | |||
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id) | |||
57 | int id_match = 0; | 57 | int id_match = 0; |
58 | 58 | ||
59 | if (dev == NULL || id == NULL) | 59 | if (dev == NULL || id == NULL) |
60 | return NULL; | 60 | return clk; |
61 | 61 | ||
62 | mutex_lock(&clocks_mutex); | 62 | mutex_lock(&clocks_mutex); |
63 | list_for_each_entry(p, &clocks, node) { | 63 | list_for_each_entry(p, &clocks, node) { |
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c index 45c0cb9b67e6..18c104820198 100644 --- a/arch/powerpc/platforms/52xx/efika.c +++ b/arch/powerpc/platforms/52xx/efika.c | |||
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void) | |||
99 | if (bus_range == NULL || len < 2 * sizeof(int)) { | 99 | if (bus_range == NULL || len < 2 * sizeof(int)) { |
100 | printk(KERN_WARNING EFIKA_PLATFORM_NAME | 100 | printk(KERN_WARNING EFIKA_PLATFORM_NAME |
101 | ": Can't get bus-range for %s\n", pcictrl->full_name); | 101 | ": Can't get bus-range for %s\n", pcictrl->full_name); |
102 | return; | 102 | goto out_put; |
103 | } | 103 | } |
104 | 104 | ||
105 | if (bus_range[1] == bus_range[0]) | 105 | if (bus_range[1] == bus_range[0]) |
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void) | |||
111 | printk(" controlled by %s\n", pcictrl->full_name); | 111 | printk(" controlled by %s\n", pcictrl->full_name); |
112 | printk("\n"); | 112 | printk("\n"); |
113 | 113 | ||
114 | hose = pcibios_alloc_controller(of_node_get(pcictrl)); | 114 | hose = pcibios_alloc_controller(pcictrl); |
115 | if (!hose) { | 115 | if (!hose) { |
116 | printk(KERN_WARNING EFIKA_PLATFORM_NAME | 116 | printk(KERN_WARNING EFIKA_PLATFORM_NAME |
117 | ": Can't allocate PCI controller structure for %s\n", | 117 | ": Can't allocate PCI controller structure for %s\n", |
118 | pcictrl->full_name); | 118 | pcictrl->full_name); |
119 | return; | 119 | goto out_put; |
120 | } | 120 | } |
121 | 121 | ||
122 | hose->first_busno = bus_range[0]; | 122 | hose->first_busno = bus_range[0]; |
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void) | |||
124 | hose->ops = &rtas_pci_ops; | 124 | hose->ops = &rtas_pci_ops; |
125 | 125 | ||
126 | pci_process_bridge_OF_ranges(hose, pcictrl, 0); | 126 | pci_process_bridge_OF_ranges(hose, pcictrl, 0); |
127 | return; | ||
128 | out_put: | ||
129 | of_node_put(pcictrl); | ||
127 | } | 130 | } |
128 | 131 | ||
129 | #else | 132 | #else |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 6e905314ad5d..41f3a7eda1de 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c | |||
@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number) | |||
325 | clrbits32(&simple_gpio->simple_dvo, sync | out); | 325 | clrbits32(&simple_gpio->simple_dvo, sync | out); |
326 | clrbits8(&wkup_gpio->wkup_dvo, reset); | 326 | clrbits8(&wkup_gpio->wkup_dvo, reset); |
327 | 327 | ||
328 | /* wait at lease 1 us */ | 328 | /* wait for 1 us */ |
329 | udelay(2); | 329 | udelay(1); |
330 | 330 | ||
331 | /* Deassert reset */ | 331 | /* Deassert reset */ |
332 | setbits8(&wkup_gpio->wkup_dvo, reset); | 332 | setbits8(&wkup_gpio->wkup_dvo, reset); |
333 | 333 | ||
334 | /* wait at least 200ns */ | ||
335 | /* 7 ~= (200ns * timebase) / ns2sec */ | ||
336 | __delay(7); | ||
337 | |||
334 | /* Restore pin-muxing */ | 338 | /* Restore pin-muxing */ |
335 | out_be32(&simple_gpio->port_config, mux); | 339 | out_be32(&simple_gpio->port_config, mux); |
336 | 340 | ||
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 22cfd634c355..f7167ee4604c 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -407,10 +407,9 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
407 | { | 407 | { |
408 | vfree(me->arch.syminfo); | 408 | vfree(me->arch.syminfo); |
409 | me->arch.syminfo = NULL; | 409 | me->arch.syminfo = NULL; |
410 | return module_bug_finalize(hdr, sechdrs, me); | 410 | return 0; |
411 | } | 411 | } |
412 | 412 | ||
413 | void module_arch_cleanup(struct module *mod) | 413 | void module_arch_cleanup(struct module *mod) |
414 | { | 414 | { |
415 | module_bug_cleanup(mod); | ||
416 | } | 415 | } |
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c index 43adddfe4c04..ae0be697a89e 100644 --- a/arch/sh/kernel/module.c +++ b/arch/sh/kernel/module.c | |||
@@ -149,13 +149,11 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
149 | int ret = 0; | 149 | int ret = 0; |
150 | 150 | ||
151 | ret |= module_dwarf_finalize(hdr, sechdrs, me); | 151 | ret |= module_dwarf_finalize(hdr, sechdrs, me); |
152 | ret |= module_bug_finalize(hdr, sechdrs, me); | ||
153 | 152 | ||
154 | return ret; | 153 | return ret; |
155 | } | 154 | } |
156 | 155 | ||
157 | void module_arch_cleanup(struct module *mod) | 156 | void module_arch_cleanup(struct module *mod) |
158 | { | 157 | { |
159 | module_bug_cleanup(mod); | ||
160 | module_dwarf_cleanup(mod); | 158 | module_dwarf_cleanup(mod); |
161 | } | 159 | } |
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c index a9dd3abde28e..d5ca1ef50fa9 100644 --- a/arch/sh/kernel/perf_callchain.c +++ b/arch/sh/kernel/perf_callchain.c | |||
@@ -14,11 +14,6 @@ | |||
14 | #include <asm/unwinder.h> | 14 | #include <asm/unwinder.h> |
15 | #include <asm/ptrace.h> | 15 | #include <asm/ptrace.h> |
16 | 16 | ||
17 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
18 | { | ||
19 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
20 | entry->ip[entry->nr++] = ip; | ||
21 | } | ||
22 | 17 | ||
23 | static void callchain_warning(void *data, char *msg) | 18 | static void callchain_warning(void *data, char *msg) |
24 | { | 19 | { |
@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable) | |||
39 | struct perf_callchain_entry *entry = data; | 34 | struct perf_callchain_entry *entry = data; |
40 | 35 | ||
41 | if (reliable) | 36 | if (reliable) |
42 | callchain_store(entry, addr); | 37 | perf_callchain_store(entry, addr); |
43 | } | 38 | } |
44 | 39 | ||
45 | static const struct stacktrace_ops callchain_ops = { | 40 | static const struct stacktrace_ops callchain_ops = { |
@@ -49,47 +44,10 @@ static const struct stacktrace_ops callchain_ops = { | |||
49 | .address = callchain_address, | 44 | .address = callchain_address, |
50 | }; | 45 | }; |
51 | 46 | ||
52 | static void | 47 | void |
53 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) | 48 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
54 | { | 49 | { |
55 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 50 | perf_callchain_store(entry, regs->pc); |
56 | callchain_store(entry, regs->pc); | ||
57 | 51 | ||
58 | unwind_stack(NULL, regs, NULL, &callchain_ops, entry); | 52 | unwind_stack(NULL, regs, NULL, &callchain_ops, entry); |
59 | } | 53 | } |
60 | |||
61 | static void | ||
62 | perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
63 | { | ||
64 | int is_user; | ||
65 | |||
66 | if (!regs) | ||
67 | return; | ||
68 | |||
69 | is_user = user_mode(regs); | ||
70 | |||
71 | if (is_user && current->state != TASK_RUNNING) | ||
72 | return; | ||
73 | |||
74 | /* | ||
75 | * Only the kernel side is implemented for now. | ||
76 | */ | ||
77 | if (!is_user) | ||
78 | perf_callchain_kernel(regs, entry); | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * No need for separate IRQ and NMI entries. | ||
83 | */ | ||
84 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | ||
85 | |||
86 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
87 | { | ||
88 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | ||
89 | |||
90 | entry->nr = 0; | ||
91 | |||
92 | perf_do_callchain(regs, entry); | ||
93 | |||
94 | return entry; | ||
95 | } | ||
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 55fe89bbdfe0..5a4b33435650 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -224,50 +224,80 @@ again: | |||
224 | local64_add(delta, &event->count); | 224 | local64_add(delta, &event->count); |
225 | } | 225 | } |
226 | 226 | ||
227 | static void sh_pmu_disable(struct perf_event *event) | 227 | static void sh_pmu_stop(struct perf_event *event, int flags) |
228 | { | 228 | { |
229 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 229 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
230 | struct hw_perf_event *hwc = &event->hw; | 230 | struct hw_perf_event *hwc = &event->hw; |
231 | int idx = hwc->idx; | 231 | int idx = hwc->idx; |
232 | 232 | ||
233 | clear_bit(idx, cpuc->active_mask); | 233 | if (!(event->hw.state & PERF_HES_STOPPED)) { |
234 | sh_pmu->disable(hwc, idx); | 234 | sh_pmu->disable(hwc, idx); |
235 | cpuc->events[idx] = NULL; | ||
236 | event->hw.state |= PERF_HES_STOPPED; | ||
237 | } | ||
238 | |||
239 | if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { | ||
240 | sh_perf_event_update(event, &event->hw, idx); | ||
241 | event->hw.state |= PERF_HES_UPTODATE; | ||
242 | } | ||
243 | } | ||
235 | 244 | ||
236 | barrier(); | 245 | static void sh_pmu_start(struct perf_event *event, int flags) |
246 | { | ||
247 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
248 | struct hw_perf_event *hwc = &event->hw; | ||
249 | int idx = hwc->idx; | ||
237 | 250 | ||
238 | sh_perf_event_update(event, &event->hw, idx); | 251 | if (WARN_ON_ONCE(idx == -1)) |
252 | return; | ||
253 | |||
254 | if (flags & PERF_EF_RELOAD) | ||
255 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
256 | |||
257 | cpuc->events[idx] = event; | ||
258 | event->hw.state = 0; | ||
259 | sh_pmu->enable(hwc, idx); | ||
260 | } | ||
261 | |||
262 | static void sh_pmu_del(struct perf_event *event, int flags) | ||
263 | { | ||
264 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
239 | 265 | ||
240 | cpuc->events[idx] = NULL; | 266 | sh_pmu_stop(event, PERF_EF_UPDATE); |
241 | clear_bit(idx, cpuc->used_mask); | 267 | __clear_bit(event->hw.idx, cpuc->used_mask); |
242 | 268 | ||
243 | perf_event_update_userpage(event); | 269 | perf_event_update_userpage(event); |
244 | } | 270 | } |
245 | 271 | ||
246 | static int sh_pmu_enable(struct perf_event *event) | 272 | static int sh_pmu_add(struct perf_event *event, int flags) |
247 | { | 273 | { |
248 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 274 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
249 | struct hw_perf_event *hwc = &event->hw; | 275 | struct hw_perf_event *hwc = &event->hw; |
250 | int idx = hwc->idx; | 276 | int idx = hwc->idx; |
277 | int ret = -EAGAIN; | ||
278 | |||
279 | perf_pmu_disable(event->pmu); | ||
251 | 280 | ||
252 | if (test_and_set_bit(idx, cpuc->used_mask)) { | 281 | if (__test_and_set_bit(idx, cpuc->used_mask)) { |
253 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); | 282 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); |
254 | if (idx == sh_pmu->num_events) | 283 | if (idx == sh_pmu->num_events) |
255 | return -EAGAIN; | 284 | goto out; |
256 | 285 | ||
257 | set_bit(idx, cpuc->used_mask); | 286 | __set_bit(idx, cpuc->used_mask); |
258 | hwc->idx = idx; | 287 | hwc->idx = idx; |
259 | } | 288 | } |
260 | 289 | ||
261 | sh_pmu->disable(hwc, idx); | 290 | sh_pmu->disable(hwc, idx); |
262 | 291 | ||
263 | cpuc->events[idx] = event; | 292 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
264 | set_bit(idx, cpuc->active_mask); | 293 | if (flags & PERF_EF_START) |
265 | 294 | sh_pmu_start(event, PERF_EF_RELOAD); | |
266 | sh_pmu->enable(hwc, idx); | ||
267 | 295 | ||
268 | perf_event_update_userpage(event); | 296 | perf_event_update_userpage(event); |
269 | 297 | ret = 0; | |
270 | return 0; | 298 | out: |
299 | perf_pmu_enable(event->pmu); | ||
300 | return ret; | ||
271 | } | 301 | } |
272 | 302 | ||
273 | static void sh_pmu_read(struct perf_event *event) | 303 | static void sh_pmu_read(struct perf_event *event) |
@@ -275,24 +305,56 @@ static void sh_pmu_read(struct perf_event *event) | |||
275 | sh_perf_event_update(event, &event->hw, event->hw.idx); | 305 | sh_perf_event_update(event, &event->hw, event->hw.idx); |
276 | } | 306 | } |
277 | 307 | ||
278 | static const struct pmu pmu = { | 308 | static int sh_pmu_event_init(struct perf_event *event) |
279 | .enable = sh_pmu_enable, | ||
280 | .disable = sh_pmu_disable, | ||
281 | .read = sh_pmu_read, | ||
282 | }; | ||
283 | |||
284 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
285 | { | 309 | { |
286 | int err = __hw_perf_event_init(event); | 310 | int err; |
311 | |||
312 | switch (event->attr.type) { | ||
313 | case PERF_TYPE_RAW: | ||
314 | case PERF_TYPE_HW_CACHE: | ||
315 | case PERF_TYPE_HARDWARE: | ||
316 | err = __hw_perf_event_init(event); | ||
317 | break; | ||
318 | |||
319 | default: | ||
320 | return -ENOENT; | ||
321 | } | ||
322 | |||
287 | if (unlikely(err)) { | 323 | if (unlikely(err)) { |
288 | if (event->destroy) | 324 | if (event->destroy) |
289 | event->destroy(event); | 325 | event->destroy(event); |
290 | return ERR_PTR(err); | ||
291 | } | 326 | } |
292 | 327 | ||
293 | return &pmu; | 328 | return err; |
329 | } | ||
330 | |||
331 | static void sh_pmu_enable(struct pmu *pmu) | ||
332 | { | ||
333 | if (!sh_pmu_initialized()) | ||
334 | return; | ||
335 | |||
336 | sh_pmu->enable_all(); | ||
337 | } | ||
338 | |||
339 | static void sh_pmu_disable(struct pmu *pmu) | ||
340 | { | ||
341 | if (!sh_pmu_initialized()) | ||
342 | return; | ||
343 | |||
344 | sh_pmu->disable_all(); | ||
294 | } | 345 | } |
295 | 346 | ||
347 | static struct pmu pmu = { | ||
348 | .pmu_enable = sh_pmu_enable, | ||
349 | .pmu_disable = sh_pmu_disable, | ||
350 | .event_init = sh_pmu_event_init, | ||
351 | .add = sh_pmu_add, | ||
352 | .del = sh_pmu_del, | ||
353 | .start = sh_pmu_start, | ||
354 | .stop = sh_pmu_stop, | ||
355 | .read = sh_pmu_read, | ||
356 | }; | ||
357 | |||
296 | static void sh_pmu_setup(int cpu) | 358 | static void sh_pmu_setup(int cpu) |
297 | { | 359 | { |
298 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 360 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
@@ -317,32 +379,17 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
317 | return NOTIFY_OK; | 379 | return NOTIFY_OK; |
318 | } | 380 | } |
319 | 381 | ||
320 | void hw_perf_enable(void) | 382 | int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) |
321 | { | ||
322 | if (!sh_pmu_initialized()) | ||
323 | return; | ||
324 | |||
325 | sh_pmu->enable_all(); | ||
326 | } | ||
327 | |||
328 | void hw_perf_disable(void) | ||
329 | { | ||
330 | if (!sh_pmu_initialized()) | ||
331 | return; | ||
332 | |||
333 | sh_pmu->disable_all(); | ||
334 | } | ||
335 | |||
336 | int __cpuinit register_sh_pmu(struct sh_pmu *pmu) | ||
337 | { | 383 | { |
338 | if (sh_pmu) | 384 | if (sh_pmu) |
339 | return -EBUSY; | 385 | return -EBUSY; |
340 | sh_pmu = pmu; | 386 | sh_pmu = _pmu; |
341 | 387 | ||
342 | pr_info("Performance Events: %s support registered\n", pmu->name); | 388 | pr_info("Performance Events: %s support registered\n", _pmu->name); |
343 | 389 | ||
344 | WARN_ON(pmu->num_events > MAX_HWEVENTS); | 390 | WARN_ON(_pmu->num_events > MAX_HWEVENTS); |
345 | 391 | ||
392 | perf_pmu_register(&pmu); | ||
346 | perf_cpu_notifier(sh_pmu_notifier); | 393 | perf_cpu_notifier(sh_pmu_notifier); |
347 | return 0; | 394 | return 0; |
348 | } | 395 | } |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 491e9d6de191..9212cd42a832 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -30,6 +30,7 @@ config SPARC | |||
30 | select PERF_USE_VMALLOC | 30 | select PERF_USE_VMALLOC |
31 | select HAVE_DMA_ATTRS | 31 | select HAVE_DMA_ATTRS |
32 | select HAVE_DMA_API_DEBUG | 32 | select HAVE_DMA_API_DEBUG |
33 | select HAVE_ARCH_JUMP_LABEL | ||
33 | 34 | ||
34 | config SPARC32 | 35 | config SPARC32 |
35 | def_bool !64BIT | 36 | def_bool !64BIT |
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h new file mode 100644 index 000000000000..62e66d7b2fb6 --- /dev/null +++ b/arch/sparc/include/asm/jump_label.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef _ASM_SPARC_JUMP_LABEL_H | ||
2 | #define _ASM_SPARC_JUMP_LABEL_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/types.h> | ||
7 | #include <asm/system.h> | ||
8 | |||
9 | #define JUMP_LABEL_NOP_SIZE 4 | ||
10 | |||
11 | #define JUMP_LABEL(key, label) \ | ||
12 | do { \ | ||
13 | asm goto("1:\n\t" \ | ||
14 | "nop\n\t" \ | ||
15 | "nop\n\t" \ | ||
16 | ".pushsection __jump_table, \"a\"\n\t"\ | ||
17 | ".word 1b, %l[" #label "], %c0\n\t" \ | ||
18 | ".popsection \n\t" \ | ||
19 | : : "i" (key) : : label);\ | ||
20 | } while (0) | ||
21 | |||
22 | #endif /* __KERNEL__ */ | ||
23 | |||
24 | typedef u32 jump_label_t; | ||
25 | |||
26 | struct jump_entry { | ||
27 | jump_label_t code; | ||
28 | jump_label_t target; | ||
29 | jump_label_t key; | ||
30 | }; | ||
31 | |||
32 | #endif | ||
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 0c2dc1f24a9a..599398fbbc7c 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -119,3 +119,5 @@ obj-$(CONFIG_COMPAT) += $(audit--y) | |||
119 | 119 | ||
120 | pc--$(CONFIG_PERF_EVENTS) := perf_event.o | 120 | pc--$(CONFIG_PERF_EVENTS) := perf_event.o |
121 | obj-$(CONFIG_SPARC64) += $(pc--y) | 121 | obj-$(CONFIG_SPARC64) += $(pc--y) |
122 | |||
123 | obj-$(CONFIG_SPARC64) += jump_label.o | ||
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c new file mode 100644 index 000000000000..ea2dafc93d78 --- /dev/null +++ b/arch/sparc/kernel/jump_label.c | |||
@@ -0,0 +1,47 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/mutex.h> | ||
4 | #include <linux/cpu.h> | ||
5 | |||
6 | #include <linux/jump_label.h> | ||
7 | #include <linux/memory.h> | ||
8 | |||
9 | #ifdef HAVE_JUMP_LABEL | ||
10 | |||
11 | void arch_jump_label_transform(struct jump_entry *entry, | ||
12 | enum jump_label_type type) | ||
13 | { | ||
14 | u32 val; | ||
15 | u32 *insn = (u32 *) (unsigned long) entry->code; | ||
16 | |||
17 | if (type == JUMP_LABEL_ENABLE) { | ||
18 | s32 off = (s32)entry->target - (s32)entry->code; | ||
19 | |||
20 | #ifdef CONFIG_SPARC64 | ||
21 | /* ba,pt %xcc, . + (off << 2) */ | ||
22 | val = 0x10680000 | ((u32) off >> 2); | ||
23 | #else | ||
24 | /* ba . + (off << 2) */ | ||
25 | val = 0x10800000 | ((u32) off >> 2); | ||
26 | #endif | ||
27 | } else { | ||
28 | val = 0x01000000; | ||
29 | } | ||
30 | |||
31 | get_online_cpus(); | ||
32 | mutex_lock(&text_mutex); | ||
33 | *insn = val; | ||
34 | flushi(insn); | ||
35 | mutex_unlock(&text_mutex); | ||
36 | put_online_cpus(); | ||
37 | } | ||
38 | |||
39 | void arch_jump_label_text_poke_early(jump_label_t addr) | ||
40 | { | ||
41 | u32 *insn_p = (u32 *) (unsigned long) addr; | ||
42 | |||
43 | *insn_p = 0x01000000; | ||
44 | flushi(insn_p); | ||
45 | } | ||
46 | |||
47 | #endif | ||
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index f848aadf54dc..ee3c7dde8d9f 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c | |||
@@ -18,6 +18,9 @@ | |||
18 | #include <asm/spitfire.h> | 18 | #include <asm/spitfire.h> |
19 | 19 | ||
20 | #ifdef CONFIG_SPARC64 | 20 | #ifdef CONFIG_SPARC64 |
21 | |||
22 | #include <linux/jump_label.h> | ||
23 | |||
21 | static void *module_map(unsigned long size) | 24 | static void *module_map(unsigned long size) |
22 | { | 25 | { |
23 | struct vm_struct *area; | 26 | struct vm_struct *area; |
@@ -227,6 +230,9 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
227 | const Elf_Shdr *sechdrs, | 230 | const Elf_Shdr *sechdrs, |
228 | struct module *me) | 231 | struct module *me) |
229 | { | 232 | { |
233 | /* make jump label nops */ | ||
234 | jump_label_apply_nops(me); | ||
235 | |||
230 | /* Cheetah's I-cache is fully coherent. */ | 236 | /* Cheetah's I-cache is fully coherent. */ |
231 | if (tlb_type == spitfire) { | 237 | if (tlb_type == spitfire) { |
232 | unsigned long va; | 238 | unsigned long va; |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 6318e622cfb0..0d6deb55a2ae 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -658,13 +658,16 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) | |||
658 | 658 | ||
659 | enc = perf_event_get_enc(cpuc->events[i]); | 659 | enc = perf_event_get_enc(cpuc->events[i]); |
660 | pcr &= ~mask_for_index(idx); | 660 | pcr &= ~mask_for_index(idx); |
661 | pcr |= event_encoding(enc, idx); | 661 | if (hwc->state & PERF_HES_STOPPED) |
662 | pcr |= nop_for_index(idx); | ||
663 | else | ||
664 | pcr |= event_encoding(enc, idx); | ||
662 | } | 665 | } |
663 | out: | 666 | out: |
664 | return pcr; | 667 | return pcr; |
665 | } | 668 | } |
666 | 669 | ||
667 | void hw_perf_enable(void) | 670 | static void sparc_pmu_enable(struct pmu *pmu) |
668 | { | 671 | { |
669 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 672 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
670 | u64 pcr; | 673 | u64 pcr; |
@@ -691,7 +694,7 @@ void hw_perf_enable(void) | |||
691 | pcr_ops->write(cpuc->pcr); | 694 | pcr_ops->write(cpuc->pcr); |
692 | } | 695 | } |
693 | 696 | ||
694 | void hw_perf_disable(void) | 697 | static void sparc_pmu_disable(struct pmu *pmu) |
695 | { | 698 | { |
696 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 699 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
697 | u64 val; | 700 | u64 val; |
@@ -710,19 +713,65 @@ void hw_perf_disable(void) | |||
710 | pcr_ops->write(cpuc->pcr); | 713 | pcr_ops->write(cpuc->pcr); |
711 | } | 714 | } |
712 | 715 | ||
713 | static void sparc_pmu_disable(struct perf_event *event) | 716 | static int active_event_index(struct cpu_hw_events *cpuc, |
717 | struct perf_event *event) | ||
718 | { | ||
719 | int i; | ||
720 | |||
721 | for (i = 0; i < cpuc->n_events; i++) { | ||
722 | if (cpuc->event[i] == event) | ||
723 | break; | ||
724 | } | ||
725 | BUG_ON(i == cpuc->n_events); | ||
726 | return cpuc->current_idx[i]; | ||
727 | } | ||
728 | |||
729 | static void sparc_pmu_start(struct perf_event *event, int flags) | ||
730 | { | ||
731 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
732 | int idx = active_event_index(cpuc, event); | ||
733 | |||
734 | if (flags & PERF_EF_RELOAD) { | ||
735 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
736 | sparc_perf_event_set_period(event, &event->hw, idx); | ||
737 | } | ||
738 | |||
739 | event->hw.state = 0; | ||
740 | |||
741 | sparc_pmu_enable_event(cpuc, &event->hw, idx); | ||
742 | } | ||
743 | |||
744 | static void sparc_pmu_stop(struct perf_event *event, int flags) | ||
745 | { | ||
746 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
747 | int idx = active_event_index(cpuc, event); | ||
748 | |||
749 | if (!(event->hw.state & PERF_HES_STOPPED)) { | ||
750 | sparc_pmu_disable_event(cpuc, &event->hw, idx); | ||
751 | event->hw.state |= PERF_HES_STOPPED; | ||
752 | } | ||
753 | |||
754 | if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) { | ||
755 | sparc_perf_event_update(event, &event->hw, idx); | ||
756 | event->hw.state |= PERF_HES_UPTODATE; | ||
757 | } | ||
758 | } | ||
759 | |||
760 | static void sparc_pmu_del(struct perf_event *event, int _flags) | ||
714 | { | 761 | { |
715 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 762 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
716 | struct hw_perf_event *hwc = &event->hw; | ||
717 | unsigned long flags; | 763 | unsigned long flags; |
718 | int i; | 764 | int i; |
719 | 765 | ||
720 | local_irq_save(flags); | 766 | local_irq_save(flags); |
721 | perf_disable(); | 767 | perf_pmu_disable(event->pmu); |
722 | 768 | ||
723 | for (i = 0; i < cpuc->n_events; i++) { | 769 | for (i = 0; i < cpuc->n_events; i++) { |
724 | if (event == cpuc->event[i]) { | 770 | if (event == cpuc->event[i]) { |
725 | int idx = cpuc->current_idx[i]; | 771 | /* Absorb the final count and turn off the |
772 | * event. | ||
773 | */ | ||
774 | sparc_pmu_stop(event, PERF_EF_UPDATE); | ||
726 | 775 | ||
727 | /* Shift remaining entries down into | 776 | /* Shift remaining entries down into |
728 | * the existing slot. | 777 | * the existing slot. |
@@ -734,13 +783,6 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
734 | cpuc->current_idx[i]; | 783 | cpuc->current_idx[i]; |
735 | } | 784 | } |
736 | 785 | ||
737 | /* Absorb the final count and turn off the | ||
738 | * event. | ||
739 | */ | ||
740 | sparc_pmu_disable_event(cpuc, hwc, idx); | ||
741 | barrier(); | ||
742 | sparc_perf_event_update(event, hwc, idx); | ||
743 | |||
744 | perf_event_update_userpage(event); | 786 | perf_event_update_userpage(event); |
745 | 787 | ||
746 | cpuc->n_events--; | 788 | cpuc->n_events--; |
@@ -748,23 +790,10 @@ static void sparc_pmu_disable(struct perf_event *event) | |||
748 | } | 790 | } |
749 | } | 791 | } |
750 | 792 | ||
751 | perf_enable(); | 793 | perf_pmu_enable(event->pmu); |
752 | local_irq_restore(flags); | 794 | local_irq_restore(flags); |
753 | } | 795 | } |
754 | 796 | ||
755 | static int active_event_index(struct cpu_hw_events *cpuc, | ||
756 | struct perf_event *event) | ||
757 | { | ||
758 | int i; | ||
759 | |||
760 | for (i = 0; i < cpuc->n_events; i++) { | ||
761 | if (cpuc->event[i] == event) | ||
762 | break; | ||
763 | } | ||
764 | BUG_ON(i == cpuc->n_events); | ||
765 | return cpuc->current_idx[i]; | ||
766 | } | ||
767 | |||
768 | static void sparc_pmu_read(struct perf_event *event) | 797 | static void sparc_pmu_read(struct perf_event *event) |
769 | { | 798 | { |
770 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 799 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
@@ -774,15 +803,6 @@ static void sparc_pmu_read(struct perf_event *event) | |||
774 | sparc_perf_event_update(event, hwc, idx); | 803 | sparc_perf_event_update(event, hwc, idx); |
775 | } | 804 | } |
776 | 805 | ||
777 | static void sparc_pmu_unthrottle(struct perf_event *event) | ||
778 | { | ||
779 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
780 | int idx = active_event_index(cpuc, event); | ||
781 | struct hw_perf_event *hwc = &event->hw; | ||
782 | |||
783 | sparc_pmu_enable_event(cpuc, hwc, idx); | ||
784 | } | ||
785 | |||
786 | static atomic_t active_events = ATOMIC_INIT(0); | 806 | static atomic_t active_events = ATOMIC_INIT(0); |
787 | static DEFINE_MUTEX(pmc_grab_mutex); | 807 | static DEFINE_MUTEX(pmc_grab_mutex); |
788 | 808 | ||
@@ -877,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts, | |||
877 | if (!n_ev) | 897 | if (!n_ev) |
878 | return 0; | 898 | return 0; |
879 | 899 | ||
880 | if (n_ev > perf_max_events) | 900 | if (n_ev > MAX_HWEVENTS) |
881 | return -1; | 901 | return -1; |
882 | 902 | ||
883 | msk0 = perf_event_get_msk(events[0]); | 903 | msk0 = perf_event_get_msk(events[0]); |
@@ -984,23 +1004,27 @@ static int collect_events(struct perf_event *group, int max_count, | |||
984 | return n; | 1004 | return n; |
985 | } | 1005 | } |
986 | 1006 | ||
987 | static int sparc_pmu_enable(struct perf_event *event) | 1007 | static int sparc_pmu_add(struct perf_event *event, int ef_flags) |
988 | { | 1008 | { |
989 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1009 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
990 | int n0, ret = -EAGAIN; | 1010 | int n0, ret = -EAGAIN; |
991 | unsigned long flags; | 1011 | unsigned long flags; |
992 | 1012 | ||
993 | local_irq_save(flags); | 1013 | local_irq_save(flags); |
994 | perf_disable(); | 1014 | perf_pmu_disable(event->pmu); |
995 | 1015 | ||
996 | n0 = cpuc->n_events; | 1016 | n0 = cpuc->n_events; |
997 | if (n0 >= perf_max_events) | 1017 | if (n0 >= MAX_HWEVENTS) |
998 | goto out; | 1018 | goto out; |
999 | 1019 | ||
1000 | cpuc->event[n0] = event; | 1020 | cpuc->event[n0] = event; |
1001 | cpuc->events[n0] = event->hw.event_base; | 1021 | cpuc->events[n0] = event->hw.event_base; |
1002 | cpuc->current_idx[n0] = PIC_NO_INDEX; | 1022 | cpuc->current_idx[n0] = PIC_NO_INDEX; |
1003 | 1023 | ||
1024 | event->hw.state = PERF_HES_UPTODATE; | ||
1025 | if (!(ef_flags & PERF_EF_START)) | ||
1026 | event->hw.state |= PERF_HES_STOPPED; | ||
1027 | |||
1004 | /* | 1028 | /* |
1005 | * If group events scheduling transaction was started, | 1029 | * If group events scheduling transaction was started, |
1006 | * skip the schedulability test here, it will be peformed | 1030 | * skip the schedulability test here, it will be peformed |
@@ -1020,12 +1044,12 @@ nocheck: | |||
1020 | 1044 | ||
1021 | ret = 0; | 1045 | ret = 0; |
1022 | out: | 1046 | out: |
1023 | perf_enable(); | 1047 | perf_pmu_enable(event->pmu); |
1024 | local_irq_restore(flags); | 1048 | local_irq_restore(flags); |
1025 | return ret; | 1049 | return ret; |
1026 | } | 1050 | } |
1027 | 1051 | ||
1028 | static int __hw_perf_event_init(struct perf_event *event) | 1052 | static int sparc_pmu_event_init(struct perf_event *event) |
1029 | { | 1053 | { |
1030 | struct perf_event_attr *attr = &event->attr; | 1054 | struct perf_event_attr *attr = &event->attr; |
1031 | struct perf_event *evts[MAX_HWEVENTS]; | 1055 | struct perf_event *evts[MAX_HWEVENTS]; |
@@ -1038,22 +1062,33 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1038 | if (atomic_read(&nmi_active) < 0) | 1062 | if (atomic_read(&nmi_active) < 0) |
1039 | return -ENODEV; | 1063 | return -ENODEV; |
1040 | 1064 | ||
1041 | pmap = NULL; | 1065 | switch (attr->type) { |
1042 | if (attr->type == PERF_TYPE_HARDWARE) { | 1066 | case PERF_TYPE_HARDWARE: |
1043 | if (attr->config >= sparc_pmu->max_events) | 1067 | if (attr->config >= sparc_pmu->max_events) |
1044 | return -EINVAL; | 1068 | return -EINVAL; |
1045 | pmap = sparc_pmu->event_map(attr->config); | 1069 | pmap = sparc_pmu->event_map(attr->config); |
1046 | } else if (attr->type == PERF_TYPE_HW_CACHE) { | 1070 | break; |
1071 | |||
1072 | case PERF_TYPE_HW_CACHE: | ||
1047 | pmap = sparc_map_cache_event(attr->config); | 1073 | pmap = sparc_map_cache_event(attr->config); |
1048 | if (IS_ERR(pmap)) | 1074 | if (IS_ERR(pmap)) |
1049 | return PTR_ERR(pmap); | 1075 | return PTR_ERR(pmap); |
1050 | } else if (attr->type != PERF_TYPE_RAW) | 1076 | break; |
1051 | return -EOPNOTSUPP; | 1077 | |
1078 | case PERF_TYPE_RAW: | ||
1079 | pmap = NULL; | ||
1080 | break; | ||
1081 | |||
1082 | default: | ||
1083 | return -ENOENT; | ||
1084 | |||
1085 | } | ||
1052 | 1086 | ||
1053 | if (pmap) { | 1087 | if (pmap) { |
1054 | hwc->event_base = perf_event_encode(pmap); | 1088 | hwc->event_base = perf_event_encode(pmap); |
1055 | } else { | 1089 | } else { |
1056 | /* User gives us "(encoding << 16) | pic_mask" for | 1090 | /* |
1091 | * User gives us "(encoding << 16) | pic_mask" for | ||
1057 | * PERF_TYPE_RAW events. | 1092 | * PERF_TYPE_RAW events. |
1058 | */ | 1093 | */ |
1059 | hwc->event_base = attr->config; | 1094 | hwc->event_base = attr->config; |
@@ -1071,7 +1106,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1071 | n = 0; | 1106 | n = 0; |
1072 | if (event->group_leader != event) { | 1107 | if (event->group_leader != event) { |
1073 | n = collect_events(event->group_leader, | 1108 | n = collect_events(event->group_leader, |
1074 | perf_max_events - 1, | 1109 | MAX_HWEVENTS - 1, |
1075 | evts, events, current_idx_dmy); | 1110 | evts, events, current_idx_dmy); |
1076 | if (n < 0) | 1111 | if (n < 0) |
1077 | return -EINVAL; | 1112 | return -EINVAL; |
@@ -1107,10 +1142,11 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
1107 | * Set the flag to make pmu::enable() not perform the | 1142 | * Set the flag to make pmu::enable() not perform the |
1108 | * schedulability test, it will be performed at commit time | 1143 | * schedulability test, it will be performed at commit time |
1109 | */ | 1144 | */ |
1110 | static void sparc_pmu_start_txn(const struct pmu *pmu) | 1145 | static void sparc_pmu_start_txn(struct pmu *pmu) |
1111 | { | 1146 | { |
1112 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1147 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1113 | 1148 | ||
1149 | perf_pmu_disable(pmu); | ||
1114 | cpuhw->group_flag |= PERF_EVENT_TXN; | 1150 | cpuhw->group_flag |= PERF_EVENT_TXN; |
1115 | } | 1151 | } |
1116 | 1152 | ||
@@ -1119,11 +1155,12 @@ static void sparc_pmu_start_txn(const struct pmu *pmu) | |||
1119 | * Clear the flag and pmu::enable() will perform the | 1155 | * Clear the flag and pmu::enable() will perform the |
1120 | * schedulability test. | 1156 | * schedulability test. |
1121 | */ | 1157 | */ |
1122 | static void sparc_pmu_cancel_txn(const struct pmu *pmu) | 1158 | static void sparc_pmu_cancel_txn(struct pmu *pmu) |
1123 | { | 1159 | { |
1124 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1160 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1125 | 1161 | ||
1126 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 1162 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
1163 | perf_pmu_enable(pmu); | ||
1127 | } | 1164 | } |
1128 | 1165 | ||
1129 | /* | 1166 | /* |
@@ -1131,7 +1168,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu) | |||
1131 | * Perform the group schedulability test as a whole | 1168 | * Perform the group schedulability test as a whole |
1132 | * Return 0 if success | 1169 | * Return 0 if success |
1133 | */ | 1170 | */ |
1134 | static int sparc_pmu_commit_txn(const struct pmu *pmu) | 1171 | static int sparc_pmu_commit_txn(struct pmu *pmu) |
1135 | { | 1172 | { |
1136 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1173 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1137 | int n; | 1174 | int n; |
@@ -1147,28 +1184,24 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu) | |||
1147 | return -EAGAIN; | 1184 | return -EAGAIN; |
1148 | 1185 | ||
1149 | cpuc->group_flag &= ~PERF_EVENT_TXN; | 1186 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1187 | perf_pmu_enable(pmu); | ||
1150 | return 0; | 1188 | return 0; |
1151 | } | 1189 | } |
1152 | 1190 | ||
1153 | static const struct pmu pmu = { | 1191 | static struct pmu pmu = { |
1154 | .enable = sparc_pmu_enable, | 1192 | .pmu_enable = sparc_pmu_enable, |
1155 | .disable = sparc_pmu_disable, | 1193 | .pmu_disable = sparc_pmu_disable, |
1194 | .event_init = sparc_pmu_event_init, | ||
1195 | .add = sparc_pmu_add, | ||
1196 | .del = sparc_pmu_del, | ||
1197 | .start = sparc_pmu_start, | ||
1198 | .stop = sparc_pmu_stop, | ||
1156 | .read = sparc_pmu_read, | 1199 | .read = sparc_pmu_read, |
1157 | .unthrottle = sparc_pmu_unthrottle, | ||
1158 | .start_txn = sparc_pmu_start_txn, | 1200 | .start_txn = sparc_pmu_start_txn, |
1159 | .cancel_txn = sparc_pmu_cancel_txn, | 1201 | .cancel_txn = sparc_pmu_cancel_txn, |
1160 | .commit_txn = sparc_pmu_commit_txn, | 1202 | .commit_txn = sparc_pmu_commit_txn, |
1161 | }; | 1203 | }; |
1162 | 1204 | ||
1163 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
1164 | { | ||
1165 | int err = __hw_perf_event_init(event); | ||
1166 | |||
1167 | if (err) | ||
1168 | return ERR_PTR(err); | ||
1169 | return &pmu; | ||
1170 | } | ||
1171 | |||
1172 | void perf_event_print_debug(void) | 1205 | void perf_event_print_debug(void) |
1173 | { | 1206 | { |
1174 | unsigned long flags; | 1207 | unsigned long flags; |
@@ -1244,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
1244 | continue; | 1277 | continue; |
1245 | 1278 | ||
1246 | if (perf_event_overflow(event, 1, &data, regs)) | 1279 | if (perf_event_overflow(event, 1, &data, regs)) |
1247 | sparc_pmu_disable_event(cpuc, hwc, idx); | 1280 | sparc_pmu_stop(event, 0); |
1248 | } | 1281 | } |
1249 | 1282 | ||
1250 | return NOTIFY_STOP; | 1283 | return NOTIFY_STOP; |
@@ -1285,28 +1318,21 @@ void __init init_hw_perf_events(void) | |||
1285 | 1318 | ||
1286 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | 1319 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); |
1287 | 1320 | ||
1288 | /* All sparc64 PMUs currently have 2 events. */ | 1321 | perf_pmu_register(&pmu); |
1289 | perf_max_events = 2; | ||
1290 | |||
1291 | register_die_notifier(&perf_event_nmi_notifier); | 1322 | register_die_notifier(&perf_event_nmi_notifier); |
1292 | } | 1323 | } |
1293 | 1324 | ||
1294 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | 1325 | void perf_callchain_kernel(struct perf_callchain_entry *entry, |
1295 | { | 1326 | struct pt_regs *regs) |
1296 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
1297 | entry->ip[entry->nr++] = ip; | ||
1298 | } | ||
1299 | |||
1300 | static void perf_callchain_kernel(struct pt_regs *regs, | ||
1301 | struct perf_callchain_entry *entry) | ||
1302 | { | 1327 | { |
1303 | unsigned long ksp, fp; | 1328 | unsigned long ksp, fp; |
1304 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1329 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1305 | int graph = 0; | 1330 | int graph = 0; |
1306 | #endif | 1331 | #endif |
1307 | 1332 | ||
1308 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 1333 | stack_trace_flush(); |
1309 | callchain_store(entry, regs->tpc); | 1334 | |
1335 | perf_callchain_store(entry, regs->tpc); | ||
1310 | 1336 | ||
1311 | ksp = regs->u_regs[UREG_I6]; | 1337 | ksp = regs->u_regs[UREG_I6]; |
1312 | fp = ksp + STACK_BIAS; | 1338 | fp = ksp + STACK_BIAS; |
@@ -1330,13 +1356,13 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
1330 | pc = sf->callers_pc; | 1356 | pc = sf->callers_pc; |
1331 | fp = (unsigned long)sf->fp + STACK_BIAS; | 1357 | fp = (unsigned long)sf->fp + STACK_BIAS; |
1332 | } | 1358 | } |
1333 | callchain_store(entry, pc); | 1359 | perf_callchain_store(entry, pc); |
1334 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1360 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1335 | if ((pc + 8UL) == (unsigned long) &return_to_handler) { | 1361 | if ((pc + 8UL) == (unsigned long) &return_to_handler) { |
1336 | int index = current->curr_ret_stack; | 1362 | int index = current->curr_ret_stack; |
1337 | if (current->ret_stack && index >= graph) { | 1363 | if (current->ret_stack && index >= graph) { |
1338 | pc = current->ret_stack[index - graph].ret; | 1364 | pc = current->ret_stack[index - graph].ret; |
1339 | callchain_store(entry, pc); | 1365 | perf_callchain_store(entry, pc); |
1340 | graph++; | 1366 | graph++; |
1341 | } | 1367 | } |
1342 | } | 1368 | } |
@@ -1344,13 +1370,12 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
1344 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | 1370 | } while (entry->nr < PERF_MAX_STACK_DEPTH); |
1345 | } | 1371 | } |
1346 | 1372 | ||
1347 | static void perf_callchain_user_64(struct pt_regs *regs, | 1373 | static void perf_callchain_user_64(struct perf_callchain_entry *entry, |
1348 | struct perf_callchain_entry *entry) | 1374 | struct pt_regs *regs) |
1349 | { | 1375 | { |
1350 | unsigned long ufp; | 1376 | unsigned long ufp; |
1351 | 1377 | ||
1352 | callchain_store(entry, PERF_CONTEXT_USER); | 1378 | perf_callchain_store(entry, regs->tpc); |
1353 | callchain_store(entry, regs->tpc); | ||
1354 | 1379 | ||
1355 | ufp = regs->u_regs[UREG_I6] + STACK_BIAS; | 1380 | ufp = regs->u_regs[UREG_I6] + STACK_BIAS; |
1356 | do { | 1381 | do { |
@@ -1363,17 +1388,16 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
1363 | 1388 | ||
1364 | pc = sf.callers_pc; | 1389 | pc = sf.callers_pc; |
1365 | ufp = (unsigned long)sf.fp + STACK_BIAS; | 1390 | ufp = (unsigned long)sf.fp + STACK_BIAS; |
1366 | callchain_store(entry, pc); | 1391 | perf_callchain_store(entry, pc); |
1367 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | 1392 | } while (entry->nr < PERF_MAX_STACK_DEPTH); |
1368 | } | 1393 | } |
1369 | 1394 | ||
1370 | static void perf_callchain_user_32(struct pt_regs *regs, | 1395 | static void perf_callchain_user_32(struct perf_callchain_entry *entry, |
1371 | struct perf_callchain_entry *entry) | 1396 | struct pt_regs *regs) |
1372 | { | 1397 | { |
1373 | unsigned long ufp; | 1398 | unsigned long ufp; |
1374 | 1399 | ||
1375 | callchain_store(entry, PERF_CONTEXT_USER); | 1400 | perf_callchain_store(entry, regs->tpc); |
1376 | callchain_store(entry, regs->tpc); | ||
1377 | 1401 | ||
1378 | ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; | 1402 | ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; |
1379 | do { | 1403 | do { |
@@ -1386,34 +1410,16 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
1386 | 1410 | ||
1387 | pc = sf.callers_pc; | 1411 | pc = sf.callers_pc; |
1388 | ufp = (unsigned long)sf.fp; | 1412 | ufp = (unsigned long)sf.fp; |
1389 | callchain_store(entry, pc); | 1413 | perf_callchain_store(entry, pc); |
1390 | } while (entry->nr < PERF_MAX_STACK_DEPTH); | 1414 | } while (entry->nr < PERF_MAX_STACK_DEPTH); |
1391 | } | 1415 | } |
1392 | 1416 | ||
1393 | /* Like powerpc we can't get PMU interrupts within the PMU handler, | 1417 | void |
1394 | * so no need for separate NMI and IRQ chains as on x86. | 1418 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
1395 | */ | ||
1396 | static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); | ||
1397 | |||
1398 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
1399 | { | 1419 | { |
1400 | struct perf_callchain_entry *entry = &__get_cpu_var(callchain); | 1420 | flushw_user(); |
1401 | 1421 | if (test_thread_flag(TIF_32BIT)) | |
1402 | entry->nr = 0; | 1422 | perf_callchain_user_32(entry, regs); |
1403 | if (!user_mode(regs)) { | 1423 | else |
1404 | stack_trace_flush(); | 1424 | perf_callchain_user_64(entry, regs); |
1405 | perf_callchain_kernel(regs, entry); | ||
1406 | if (current->mm) | ||
1407 | regs = task_pt_regs(current); | ||
1408 | else | ||
1409 | regs = NULL; | ||
1410 | } | ||
1411 | if (regs) { | ||
1412 | flushw_user(); | ||
1413 | if (test_thread_flag(TIF_32BIT)) | ||
1414 | perf_callchain_user_32(regs, entry); | ||
1415 | else | ||
1416 | perf_callchain_user_64(regs, entry); | ||
1417 | } | ||
1418 | return entry; | ||
1419 | } | 1425 | } |
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 84f296ca9e63..8f58bdff20d7 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -1506,13 +1506,6 @@ handle_ill: | |||
1506 | } | 1506 | } |
1507 | STD_ENDPROC(handle_ill) | 1507 | STD_ENDPROC(handle_ill) |
1508 | 1508 | ||
1509 | .pushsection .rodata, "a" | ||
1510 | .align 8 | ||
1511 | bpt_code: | ||
1512 | bpt | ||
1513 | ENDPROC(bpt_code) | ||
1514 | .popsection | ||
1515 | |||
1516 | /* Various stub interrupt handlers and syscall handlers */ | 1509 | /* Various stub interrupt handlers and syscall handlers */ |
1517 | 1510 | ||
1518 | STD_ENTRY_LOCAL(_kernel_double_fault) | 1511 | STD_ENTRY_LOCAL(_kernel_double_fault) |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 2ab233ba32c1..47d0c37897d5 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -255,18 +255,6 @@ static void uml_net_tx_timeout(struct net_device *dev) | |||
255 | netif_wake_queue(dev); | 255 | netif_wake_queue(dev); |
256 | } | 256 | } |
257 | 257 | ||
258 | static int uml_net_set_mac(struct net_device *dev, void *addr) | ||
259 | { | ||
260 | struct uml_net_private *lp = netdev_priv(dev); | ||
261 | struct sockaddr *hwaddr = addr; | ||
262 | |||
263 | spin_lock_irq(&lp->lock); | ||
264 | eth_mac_addr(dev, hwaddr->sa_data); | ||
265 | spin_unlock_irq(&lp->lock); | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static int uml_net_change_mtu(struct net_device *dev, int new_mtu) | 258 | static int uml_net_change_mtu(struct net_device *dev, int new_mtu) |
271 | { | 259 | { |
272 | dev->mtu = new_mtu; | 260 | dev->mtu = new_mtu; |
@@ -373,7 +361,7 @@ static const struct net_device_ops uml_netdev_ops = { | |||
373 | .ndo_start_xmit = uml_net_start_xmit, | 361 | .ndo_start_xmit = uml_net_start_xmit, |
374 | .ndo_set_multicast_list = uml_net_set_multicast_list, | 362 | .ndo_set_multicast_list = uml_net_set_multicast_list, |
375 | .ndo_tx_timeout = uml_net_tx_timeout, | 363 | .ndo_tx_timeout = uml_net_tx_timeout, |
376 | .ndo_set_mac_address = uml_net_set_mac, | 364 | .ndo_set_mac_address = eth_mac_addr, |
377 | .ndo_change_mtu = uml_net_change_mtu, | 365 | .ndo_change_mtu = uml_net_change_mtu, |
378 | .ndo_validate_addr = eth_validate_addr, | 366 | .ndo_validate_addr = eth_validate_addr, |
379 | }; | 367 | }; |
@@ -472,7 +460,8 @@ static void eth_configure(int n, void *init, char *mac, | |||
472 | ((*transport->user->init)(&lp->user, dev) != 0)) | 460 | ((*transport->user->init)(&lp->user, dev) != 0)) |
473 | goto out_unregister; | 461 | goto out_unregister; |
474 | 462 | ||
475 | eth_mac_addr(dev, device->mac); | 463 | /* don't use eth_mac_addr, it will not work here */ |
464 | memcpy(dev->dev_addr, device->mac, ETH_ALEN); | ||
476 | dev->mtu = transport->user->mtu; | 465 | dev->mtu = transport->user->mtu; |
477 | dev->netdev_ops = ¨_netdev_ops; | 466 | dev->netdev_ops = ¨_netdev_ops; |
478 | dev->ethtool_ops = ¨_net_ethtool_ops; | 467 | dev->ethtool_ops = ¨_net_ethtool_ops; |
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index cd145eda3579..49b5e1eb3262 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c | |||
@@ -62,7 +62,7 @@ static long execve1(const char *file, | |||
62 | return error; | 62 | return error; |
63 | } | 63 | } |
64 | 64 | ||
65 | long um_execve(const char *file, char __user *__user *argv, char __user *__user *env) | 65 | long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env) |
66 | { | 66 | { |
67 | long err; | 67 | long err; |
68 | 68 | ||
@@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user | |||
72 | return err; | 72 | return err; |
73 | } | 73 | } |
74 | 74 | ||
75 | long sys_execve(const char __user *file, char __user *__user *argv, | 75 | long sys_execve(const char __user *file, const char __user *const __user *argv, |
76 | char __user *__user *env) | 76 | const char __user *const __user *env) |
77 | { | 77 | { |
78 | long error; | 78 | long error; |
79 | char *filename; | 79 | char *filename; |
diff --git a/arch/um/kernel/internal.h b/arch/um/kernel/internal.h index 1303a105fe91..5bf97db24a04 100644 --- a/arch/um/kernel/internal.h +++ b/arch/um/kernel/internal.h | |||
@@ -1 +1 @@ | |||
extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env); | extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env); | ||
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c index 5ddb246626db..f958cb876ee3 100644 --- a/arch/um/kernel/syscall.c +++ b/arch/um/kernel/syscall.c | |||
@@ -60,8 +60,8 @@ int kernel_execve(const char *filename, | |||
60 | 60 | ||
61 | fs = get_fs(); | 61 | fs = get_fs(); |
62 | set_fs(KERNEL_DS); | 62 | set_fs(KERNEL_DS); |
63 | ret = um_execve(filename, (char __user *__user *)argv, | 63 | ret = um_execve(filename, (const char __user *const __user *)argv, |
64 | (char __user *__user *) envp); | 64 | (const char __user *const __user *) envp); |
65 | set_fs(fs); | 65 | set_fs(fs); |
66 | 66 | ||
67 | return ret; | 67 | return ret; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cea0cd9a316f..9815221976a7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -33,6 +33,7 @@ config X86 | |||
33 | select HAVE_KRETPROBES | 33 | select HAVE_KRETPROBES |
34 | select HAVE_OPTPROBES | 34 | select HAVE_OPTPROBES |
35 | select HAVE_FTRACE_MCOUNT_RECORD | 35 | select HAVE_FTRACE_MCOUNT_RECORD |
36 | select HAVE_C_RECORDMCOUNT | ||
36 | select HAVE_DYNAMIC_FTRACE | 37 | select HAVE_DYNAMIC_FTRACE |
37 | select HAVE_FUNCTION_TRACER | 38 | select HAVE_FUNCTION_TRACER |
38 | select HAVE_FUNCTION_GRAPH_TRACER | 39 | select HAVE_FUNCTION_GRAPH_TRACER |
@@ -59,6 +60,8 @@ config X86 | |||
59 | select ANON_INODES | 60 | select ANON_INODES |
60 | select HAVE_ARCH_KMEMCHECK | 61 | select HAVE_ARCH_KMEMCHECK |
61 | select HAVE_USER_RETURN_NOTIFIER | 62 | select HAVE_USER_RETURN_NOTIFIER |
63 | select HAVE_ARCH_JUMP_LABEL | ||
64 | select HAVE_TEXT_POKE_SMP | ||
62 | 65 | ||
63 | config INSTRUCTION_DECODER | 66 | config INSTRUCTION_DECODER |
64 | def_bool (KPROBES || PERF_EVENTS) | 67 | def_bool (KPROBES || PERF_EVENTS) |
@@ -2125,6 +2128,10 @@ config HAVE_ATOMIC_IOMAP | |||
2125 | def_bool y | 2128 | def_bool y |
2126 | depends on X86_32 | 2129 | depends on X86_32 |
2127 | 2130 | ||
2131 | config HAVE_TEXT_POKE_SMP | ||
2132 | bool | ||
2133 | select STOP_MACHINE if SMP | ||
2134 | |||
2128 | source "net/Kconfig" | 2135 | source "net/Kconfig" |
2129 | 2136 | ||
2130 | source "drivers/Kconfig" | 2137 | source "drivers/Kconfig" |
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c index 030f4b93e255..5df2869c874b 100644 --- a/arch/x86/boot/early_serial_console.c +++ b/arch/x86/boot/early_serial_console.c | |||
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void) | |||
58 | if (arg[pos] == ',') | 58 | if (arg[pos] == ',') |
59 | pos++; | 59 | pos++; |
60 | 60 | ||
61 | if (!strncmp(arg, "ttyS", 4)) { | 61 | /* |
62 | * make sure we have | ||
63 | * "serial,0x3f8,115200" | ||
64 | * "serial,ttyS0,115200" | ||
65 | * "ttyS0,115200" | ||
66 | */ | ||
67 | if (pos == 7 && !strncmp(arg + pos, "0x", 2)) { | ||
68 | port = simple_strtoull(arg + pos, &e, 16); | ||
69 | if (port == 0 || arg + pos == e) | ||
70 | port = DEFAULT_SERIAL_PORT; | ||
71 | else | ||
72 | pos = e - arg; | ||
73 | } else if (!strncmp(arg + pos, "ttyS", 4)) { | ||
62 | static const int bases[] = { 0x3f8, 0x2f8 }; | 74 | static const int bases[] = { 0x3f8, 0x2f8 }; |
63 | int idx = 0; | 75 | int idx = 0; |
64 | 76 | ||
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index bc6abb7bc7ee..76561d20ea2f 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include <linux/jump_label.h> | ||
7 | #include <asm/asm.h> | 8 | #include <asm/asm.h> |
8 | 9 | ||
9 | /* | 10 | /* |
@@ -160,6 +161,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, | |||
160 | #define __parainstructions_end NULL | 161 | #define __parainstructions_end NULL |
161 | #endif | 162 | #endif |
162 | 163 | ||
164 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | ||
165 | |||
163 | /* | 166 | /* |
164 | * Clear and restore the kernel write-protection flag on the local CPU. | 167 | * Clear and restore the kernel write-protection flag on the local CPU. |
165 | * Allows the kernel to edit read-only pages. | 168 | * Allows the kernel to edit read-only pages. |
@@ -180,4 +183,12 @@ static inline void apply_paravirt(struct paravirt_patch_site *start, | |||
180 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 183 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
181 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | 184 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); |
182 | 185 | ||
186 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | ||
187 | #define IDEAL_NOP_SIZE_5 5 | ||
188 | extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; | ||
189 | extern void arch_init_ideal_nop5(void); | ||
190 | #else | ||
191 | static inline void arch_init_ideal_nop5(void) {} | ||
192 | #endif | ||
193 | |||
183 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 194 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index d2544f1d705d..cb030374b90a 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h | |||
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { } | |||
38 | 38 | ||
39 | #endif /* !CONFIG_AMD_IOMMU_STATS */ | 39 | #endif /* !CONFIG_AMD_IOMMU_STATS */ |
40 | 40 | ||
41 | static inline bool is_rd890_iommu(struct pci_dev *pdev) | ||
42 | { | ||
43 | return (pdev->vendor == PCI_VENDOR_ID_ATI) && | ||
44 | (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); | ||
45 | } | ||
46 | |||
41 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ | 47 | #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 7014e88bc779..08616180deaf 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -368,6 +368,9 @@ struct amd_iommu { | |||
368 | /* capabilities of that IOMMU read from ACPI */ | 368 | /* capabilities of that IOMMU read from ACPI */ |
369 | u32 cap; | 369 | u32 cap; |
370 | 370 | ||
371 | /* flags read from acpi table */ | ||
372 | u8 acpi_flags; | ||
373 | |||
371 | /* | 374 | /* |
372 | * Capability pointer. There could be more than one IOMMU per PCI | 375 | * Capability pointer. There could be more than one IOMMU per PCI |
373 | * device function if there are more than one AMD IOMMU capability | 376 | * device function if there are more than one AMD IOMMU capability |
@@ -411,6 +414,15 @@ struct amd_iommu { | |||
411 | 414 | ||
412 | /* default dma_ops domain for that IOMMU */ | 415 | /* default dma_ops domain for that IOMMU */ |
413 | struct dma_ops_domain *default_dom; | 416 | struct dma_ops_domain *default_dom; |
417 | |||
418 | /* | ||
419 | * This array is required to work around a potential BIOS bug. | ||
420 | * The BIOS may miss to restore parts of the PCI configuration | ||
421 | * space when the system resumes from S3. The result is that the | ||
422 | * IOMMU does not execute commands anymore which leads to system | ||
423 | * failure. | ||
424 | */ | ||
425 | u32 cache_cfg[4]; | ||
414 | }; | 426 | }; |
415 | 427 | ||
416 | /* | 428 | /* |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 545776efeb16..bafd80defa43 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
310 | { | 310 | { |
311 | return ((1UL << (nr % BITS_PER_LONG)) & | 311 | return ((1UL << (nr % BITS_PER_LONG)) & |
312 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 312 | (addr[nr / BITS_PER_LONG])) != 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) | 315 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index c6fbb7b430d1..3f76523589af 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -168,6 +168,7 @@ | |||
168 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ | 168 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ |
169 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ | 169 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ |
170 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ | 170 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ |
171 | #define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ | ||
171 | 172 | ||
172 | /* Virtualization flags: Linux defined, word 8 */ | 173 | /* Virtualization flags: Linux defined, word 8 */ |
173 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | 174 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ |
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h new file mode 100644 index 000000000000..f52d42e80585 --- /dev/null +++ b/arch/x86/include/asm/jump_label.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef _ASM_X86_JUMP_LABEL_H | ||
2 | #define _ASM_X86_JUMP_LABEL_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/types.h> | ||
7 | #include <asm/nops.h> | ||
8 | |||
9 | #define JUMP_LABEL_NOP_SIZE 5 | ||
10 | |||
11 | # define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" | ||
12 | |||
13 | # define JUMP_LABEL(key, label) \ | ||
14 | do { \ | ||
15 | asm goto("1:" \ | ||
16 | JUMP_LABEL_INITIAL_NOP \ | ||
17 | ".pushsection __jump_table, \"a\" \n\t"\ | ||
18 | _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ | ||
19 | ".popsection \n\t" \ | ||
20 | : : "i" (key) : : label); \ | ||
21 | } while (0) | ||
22 | |||
23 | #endif /* __KERNEL__ */ | ||
24 | |||
25 | #ifdef CONFIG_X86_64 | ||
26 | typedef u64 jump_label_t; | ||
27 | #else | ||
28 | typedef u32 jump_label_t; | ||
29 | #endif | ||
30 | |||
31 | struct jump_entry { | ||
32 | jump_label_t code; | ||
33 | jump_label_t target; | ||
34 | jump_label_t key; | ||
35 | }; | ||
36 | |||
37 | #endif | ||
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index def500776b16..a70cd216be5d 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
@@ -36,19 +36,6 @@ | |||
36 | #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) | 36 | #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) |
37 | #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) | 37 | #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) |
38 | 38 | ||
39 | /* Non HT mask */ | ||
40 | #define P4_ESCR_MASK \ | ||
41 | (P4_ESCR_EVENT_MASK | \ | ||
42 | P4_ESCR_EVENTMASK_MASK | \ | ||
43 | P4_ESCR_TAG_MASK | \ | ||
44 | P4_ESCR_TAG_ENABLE | \ | ||
45 | P4_ESCR_T0_OS | \ | ||
46 | P4_ESCR_T0_USR) | ||
47 | |||
48 | /* HT mask */ | ||
49 | #define P4_ESCR_MASK_HT \ | ||
50 | (P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR) | ||
51 | |||
52 | #define P4_CCCR_OVF 0x80000000U | 39 | #define P4_CCCR_OVF 0x80000000U |
53 | #define P4_CCCR_CASCADE 0x40000000U | 40 | #define P4_CCCR_CASCADE 0x40000000U |
54 | #define P4_CCCR_OVF_PMI_T0 0x04000000U | 41 | #define P4_CCCR_OVF_PMI_T0 0x04000000U |
@@ -70,23 +57,6 @@ | |||
70 | #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) | 57 | #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) |
71 | #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) | 58 | #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) |
72 | 59 | ||
73 | /* Non HT mask */ | ||
74 | #define P4_CCCR_MASK \ | ||
75 | (P4_CCCR_OVF | \ | ||
76 | P4_CCCR_CASCADE | \ | ||
77 | P4_CCCR_OVF_PMI_T0 | \ | ||
78 | P4_CCCR_FORCE_OVF | \ | ||
79 | P4_CCCR_EDGE | \ | ||
80 | P4_CCCR_THRESHOLD_MASK | \ | ||
81 | P4_CCCR_COMPLEMENT | \ | ||
82 | P4_CCCR_COMPARE | \ | ||
83 | P4_CCCR_ESCR_SELECT_MASK | \ | ||
84 | P4_CCCR_ENABLE) | ||
85 | |||
86 | /* HT mask */ | ||
87 | #define P4_CCCR_MASK_HT \ | ||
88 | (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY) | ||
89 | |||
90 | #define P4_GEN_ESCR_EMASK(class, name, bit) \ | 60 | #define P4_GEN_ESCR_EMASK(class, name, bit) \ |
91 | class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) | 61 | class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) |
92 | #define P4_ESCR_EMASK_BIT(class, name) class##__##name | 62 | #define P4_ESCR_EMASK_BIT(class, name) class##__##name |
@@ -127,6 +97,28 @@ | |||
127 | #define P4_CONFIG_HT_SHIFT 63 | 97 | #define P4_CONFIG_HT_SHIFT 63 |
128 | #define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) | 98 | #define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) |
129 | 99 | ||
100 | /* | ||
101 | * The bits we allow to pass for RAW events | ||
102 | */ | ||
103 | #define P4_CONFIG_MASK_ESCR \ | ||
104 | P4_ESCR_EVENT_MASK | \ | ||
105 | P4_ESCR_EVENTMASK_MASK | \ | ||
106 | P4_ESCR_TAG_MASK | \ | ||
107 | P4_ESCR_TAG_ENABLE | ||
108 | |||
109 | #define P4_CONFIG_MASK_CCCR \ | ||
110 | P4_CCCR_EDGE | \ | ||
111 | P4_CCCR_THRESHOLD_MASK | \ | ||
112 | P4_CCCR_COMPLEMENT | \ | ||
113 | P4_CCCR_COMPARE | \ | ||
114 | P4_CCCR_THREAD_ANY | \ | ||
115 | P4_CCCR_RESERVED | ||
116 | |||
117 | /* some dangerous bits are reserved for kernel internals */ | ||
118 | #define P4_CONFIG_MASK \ | ||
119 | (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \ | ||
120 | (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR)) | ||
121 | |||
130 | static inline bool p4_is_event_cascaded(u64 config) | 122 | static inline bool p4_is_event_cascaded(u64 config) |
131 | { | 123 | { |
132 | u32 cccr = p4_config_unpack_cccr(config); | 124 | u32 cccr = p4_config_unpack_cccr(config); |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index fedf32a8c3ec..9d3f485e5dd0 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -34,7 +34,7 @@ GCOV_PROFILE_paravirt.o := n | |||
34 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 34 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
35 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 35 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
36 | obj-y += time.o ioport.o ldt.o dumpstack.o | 36 | obj-y += time.o ioport.o ldt.o dumpstack.o |
37 | obj-y += setup.o x86_init.o i8259.o irqinit.o | 37 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o |
38 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o | 38 | obj-$(CONFIG_X86_VISWS) += visws_quirks.o |
39 | obj-$(CONFIG_X86_32) += probe_roms_32.o | 39 | obj-$(CONFIG_X86_32) += probe_roms_32.o |
40 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 40 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index fb7a5f052e2b..fb16f17e59be 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c | |||
@@ -61,7 +61,7 @@ struct cstate_entry { | |||
61 | unsigned int ecx; | 61 | unsigned int ecx; |
62 | } states[ACPI_PROCESSOR_MAX_POWER]; | 62 | } states[ACPI_PROCESSOR_MAX_POWER]; |
63 | }; | 63 | }; |
64 | static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ | 64 | static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */ |
65 | 65 | ||
66 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; | 66 | static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; |
67 | 67 | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index f65ab8b014c4..a36bb90aef53 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -195,7 +195,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) | |||
195 | 195 | ||
196 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 196 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
197 | extern s32 __smp_locks[], __smp_locks_end[]; | 197 | extern s32 __smp_locks[], __smp_locks_end[]; |
198 | static void *text_poke_early(void *addr, const void *opcode, size_t len); | 198 | void *text_poke_early(void *addr, const void *opcode, size_t len); |
199 | 199 | ||
200 | /* Replace instructions with better alternatives for this CPU type. | 200 | /* Replace instructions with better alternatives for this CPU type. |
201 | This runs before SMP is initialized to avoid SMP problems with | 201 | This runs before SMP is initialized to avoid SMP problems with |
@@ -522,7 +522,7 @@ void __init alternative_instructions(void) | |||
522 | * instructions. And on the local CPU you need to be protected again NMI or MCE | 522 | * instructions. And on the local CPU you need to be protected again NMI or MCE |
523 | * handlers seeing an inconsistent instruction while you patch. | 523 | * handlers seeing an inconsistent instruction while you patch. |
524 | */ | 524 | */ |
525 | static void *__init_or_module text_poke_early(void *addr, const void *opcode, | 525 | void *__init_or_module text_poke_early(void *addr, const void *opcode, |
526 | size_t len) | 526 | size_t len) |
527 | { | 527 | { |
528 | unsigned long flags; | 528 | unsigned long flags; |
@@ -637,7 +637,72 @@ void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) | |||
637 | tpp.len = len; | 637 | tpp.len = len; |
638 | atomic_set(&stop_machine_first, 1); | 638 | atomic_set(&stop_machine_first, 1); |
639 | wrote_text = 0; | 639 | wrote_text = 0; |
640 | stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | 640 | /* Use __stop_machine() because the caller already got online_cpus. */ |
641 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | ||
641 | return addr; | 642 | return addr; |
642 | } | 643 | } |
643 | 644 | ||
645 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | ||
646 | |||
647 | unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; | ||
648 | |||
649 | void __init arch_init_ideal_nop5(void) | ||
650 | { | ||
651 | extern const unsigned char ftrace_test_p6nop[]; | ||
652 | extern const unsigned char ftrace_test_nop5[]; | ||
653 | extern const unsigned char ftrace_test_jmp[]; | ||
654 | int faulted = 0; | ||
655 | |||
656 | /* | ||
657 | * There is no good nop for all x86 archs. | ||
658 | * We will default to using the P6_NOP5, but first we | ||
659 | * will test to make sure that the nop will actually | ||
660 | * work on this CPU. If it faults, we will then | ||
661 | * go to a lesser efficient 5 byte nop. If that fails | ||
662 | * we then just use a jmp as our nop. This isn't the most | ||
663 | * efficient nop, but we can not use a multi part nop | ||
664 | * since we would then risk being preempted in the middle | ||
665 | * of that nop, and if we enabled tracing then, it might | ||
666 | * cause a system crash. | ||
667 | * | ||
668 | * TODO: check the cpuid to determine the best nop. | ||
669 | */ | ||
670 | asm volatile ( | ||
671 | "ftrace_test_jmp:" | ||
672 | "jmp ftrace_test_p6nop\n" | ||
673 | "nop\n" | ||
674 | "nop\n" | ||
675 | "nop\n" /* 2 byte jmp + 3 bytes */ | ||
676 | "ftrace_test_p6nop:" | ||
677 | P6_NOP5 | ||
678 | "jmp 1f\n" | ||
679 | "ftrace_test_nop5:" | ||
680 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | ||
681 | "1:" | ||
682 | ".section .fixup, \"ax\"\n" | ||
683 | "2: movl $1, %0\n" | ||
684 | " jmp ftrace_test_nop5\n" | ||
685 | "3: movl $2, %0\n" | ||
686 | " jmp 1b\n" | ||
687 | ".previous\n" | ||
688 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | ||
689 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | ||
690 | : "=r"(faulted) : "0" (faulted)); | ||
691 | |||
692 | switch (faulted) { | ||
693 | case 0: | ||
694 | pr_info("converting mcount calls to 0f 1f 44 00 00\n"); | ||
695 | memcpy(ideal_nop5, ftrace_test_p6nop, IDEAL_NOP_SIZE_5); | ||
696 | break; | ||
697 | case 1: | ||
698 | pr_info("converting mcount calls to 66 66 66 66 90\n"); | ||
699 | memcpy(ideal_nop5, ftrace_test_nop5, IDEAL_NOP_SIZE_5); | ||
700 | break; | ||
701 | case 2: | ||
702 | pr_info("converting mcount calls to jmp . + 5\n"); | ||
703 | memcpy(ideal_nop5, ftrace_test_jmp, IDEAL_NOP_SIZE_5); | ||
704 | break; | ||
705 | } | ||
706 | |||
707 | } | ||
708 | #endif | ||
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index fa044e1e30a2..679b6450382b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
1953 | size_t size, | 1953 | size_t size, |
1954 | int dir) | 1954 | int dir) |
1955 | { | 1955 | { |
1956 | dma_addr_t flush_addr; | ||
1956 | dma_addr_t i, start; | 1957 | dma_addr_t i, start; |
1957 | unsigned int pages; | 1958 | unsigned int pages; |
1958 | 1959 | ||
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
1960 | (dma_addr + size > dma_dom->aperture_size)) | 1961 | (dma_addr + size > dma_dom->aperture_size)) |
1961 | return; | 1962 | return; |
1962 | 1963 | ||
1964 | flush_addr = dma_addr; | ||
1963 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 1965 | pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
1964 | dma_addr &= PAGE_MASK; | 1966 | dma_addr &= PAGE_MASK; |
1965 | start = dma_addr; | 1967 | start = dma_addr; |
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, | |||
1974 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 1976 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
1975 | 1977 | ||
1976 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { | 1978 | if (amd_iommu_unmap_flush || dma_dom->need_flush) { |
1977 | iommu_flush_pages(&dma_dom->domain, dma_addr, size); | 1979 | iommu_flush_pages(&dma_dom->domain, flush_addr, size); |
1978 | dma_dom->need_flush = false; | 1980 | dma_dom->need_flush = false; |
1979 | } | 1981 | } |
1980 | } | 1982 | } |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 3cc63e2b8dd4..5a170cbbbed8 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) | |||
632 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), | 632 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), |
633 | MMIO_GET_LD(range)); | 633 | MMIO_GET_LD(range)); |
634 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); | 634 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); |
635 | |||
636 | if (is_rd890_iommu(iommu->dev)) { | ||
637 | pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); | ||
638 | pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); | ||
639 | pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); | ||
640 | pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); | ||
641 | } | ||
635 | } | 642 | } |
636 | 643 | ||
637 | /* | 644 | /* |
@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, | |||
649 | struct ivhd_entry *e; | 656 | struct ivhd_entry *e; |
650 | 657 | ||
651 | /* | 658 | /* |
652 | * First set the recommended feature enable bits from ACPI | 659 | * First save the recommended feature enable bits from ACPI |
653 | * into the IOMMU control registers | ||
654 | */ | 660 | */ |
655 | h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? | 661 | iommu->acpi_flags = h->flags; |
656 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : | ||
657 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | ||
658 | |||
659 | h->flags & IVHD_FLAG_PASSPW_EN_MASK ? | ||
660 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : | ||
661 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | ||
662 | |||
663 | h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? | ||
664 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : | ||
665 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | ||
666 | |||
667 | h->flags & IVHD_FLAG_ISOC_EN_MASK ? | ||
668 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : | ||
669 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | ||
670 | |||
671 | /* | ||
672 | * make IOMMU memory accesses cache coherent | ||
673 | */ | ||
674 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | ||
675 | 662 | ||
676 | /* | 663 | /* |
677 | * Done. Now parse the device entries | 664 | * Done. Now parse the device entries |
@@ -1116,6 +1103,40 @@ static void init_device_table(void) | |||
1116 | } | 1103 | } |
1117 | } | 1104 | } |
1118 | 1105 | ||
1106 | static void iommu_init_flags(struct amd_iommu *iommu) | ||
1107 | { | ||
1108 | iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? | ||
1109 | iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : | ||
1110 | iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); | ||
1111 | |||
1112 | iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? | ||
1113 | iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : | ||
1114 | iommu_feature_disable(iommu, CONTROL_PASSPW_EN); | ||
1115 | |||
1116 | iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? | ||
1117 | iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : | ||
1118 | iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); | ||
1119 | |||
1120 | iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? | ||
1121 | iommu_feature_enable(iommu, CONTROL_ISOC_EN) : | ||
1122 | iommu_feature_disable(iommu, CONTROL_ISOC_EN); | ||
1123 | |||
1124 | /* | ||
1125 | * make IOMMU memory accesses cache coherent | ||
1126 | */ | ||
1127 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | ||
1128 | } | ||
1129 | |||
1130 | static void iommu_apply_quirks(struct amd_iommu *iommu) | ||
1131 | { | ||
1132 | if (is_rd890_iommu(iommu->dev)) { | ||
1133 | pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); | ||
1134 | pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); | ||
1135 | pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); | ||
1136 | pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); | ||
1137 | } | ||
1138 | } | ||
1139 | |||
1119 | /* | 1140 | /* |
1120 | * This function finally enables all IOMMUs found in the system after | 1141 | * This function finally enables all IOMMUs found in the system after |
1121 | * they have been initialized | 1142 | * they have been initialized |
@@ -1126,6 +1147,8 @@ static void enable_iommus(void) | |||
1126 | 1147 | ||
1127 | for_each_iommu(iommu) { | 1148 | for_each_iommu(iommu) { |
1128 | iommu_disable(iommu); | 1149 | iommu_disable(iommu); |
1150 | iommu_apply_quirks(iommu); | ||
1151 | iommu_init_flags(iommu); | ||
1129 | iommu_set_device_table(iommu); | 1152 | iommu_set_device_table(iommu); |
1130 | iommu_enable_command_buffer(iommu); | 1153 | iommu_enable_command_buffer(iommu); |
1131 | iommu_enable_event_buffer(iommu); | 1154 | iommu_enable_event_buffer(iommu); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index f1efebaf5510..5c5b8f3dddb5 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc, | |||
306 | 306 | ||
307 | old_cfg = old_desc->chip_data; | 307 | old_cfg = old_desc->chip_data; |
308 | 308 | ||
309 | memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); | 309 | cfg->vector = old_cfg->vector; |
310 | cfg->move_in_progress = old_cfg->move_in_progress; | ||
311 | cpumask_copy(cfg->domain, old_cfg->domain); | ||
312 | cpumask_copy(cfg->old_domain, old_cfg->old_domain); | ||
310 | 313 | ||
311 | init_copy_irq_2_pin(old_cfg, cfg, node); | 314 | init_copy_irq_2_pin(old_cfg, cfg, node); |
312 | } | 315 | } |
313 | 316 | ||
314 | static void free_irq_cfg(struct irq_cfg *old_cfg) | 317 | static void free_irq_cfg(struct irq_cfg *cfg) |
315 | { | 318 | { |
316 | kfree(old_cfg); | 319 | free_cpumask_var(cfg->domain); |
320 | free_cpumask_var(cfg->old_domain); | ||
321 | kfree(cfg); | ||
317 | } | 322 | } |
318 | 323 | ||
319 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | 324 | void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 490dac63c2d2..f2f9ac7da25c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
545 | } | 545 | } |
546 | } | 546 | } |
547 | 547 | ||
548 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | 548 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) |
549 | { | 549 | { |
550 | u32 tfms, xlvl; | 550 | u32 tfms, xlvl; |
551 | u32 ebx; | 551 | u32 ebx; |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 3624e8a0f71b..f668bb1f7d43 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], | |||
33 | *const __x86_cpu_dev_end[]; | 33 | *const __x86_cpu_dev_end[]; |
34 | 34 | ||
35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); | 35 | extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); |
36 | extern void get_cpu_cap(struct cpuinfo_x86 *c); | ||
36 | 37 | ||
37 | #endif | 38 | #endif |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 994230d4dc4e..4f6f679f2799 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -368,16 +368,22 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle) | |||
368 | return -ENODEV; | 368 | return -ENODEV; |
369 | 369 | ||
370 | out_obj = output.pointer; | 370 | out_obj = output.pointer; |
371 | if (out_obj->type != ACPI_TYPE_BUFFER) | 371 | if (out_obj->type != ACPI_TYPE_BUFFER) { |
372 | return -ENODEV; | 372 | ret = -ENODEV; |
373 | goto out_free; | ||
374 | } | ||
373 | 375 | ||
374 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); | 376 | errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); |
375 | if (errors) | 377 | if (errors) { |
376 | return -ENODEV; | 378 | ret = -ENODEV; |
379 | goto out_free; | ||
380 | } | ||
377 | 381 | ||
378 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); | 382 | supported = *((u32 *)(out_obj->buffer.pointer + 4)); |
379 | if (!(supported & 0x1)) | 383 | if (!(supported & 0x1)) { |
380 | return -ENODEV; | 384 | ret = -ENODEV; |
385 | goto out_free; | ||
386 | } | ||
381 | 387 | ||
382 | out_free: | 388 | out_free: |
383 | kfree(output.pointer); | 389 | kfree(output.pointer); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 85f69cdeae10..b4389441efbb 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | 39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; |
40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | 40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); |
41 | c->cpuid_level = cpuid_eax(0); | 41 | c->cpuid_level = cpuid_eax(0); |
42 | get_cpu_cap(c); | ||
42 | } | 43 | } |
43 | } | 44 | } |
44 | 45 | ||
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 03a5b0385ad6..e2513f26ba8b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -531,7 +531,7 @@ static int x86_pmu_hw_config(struct perf_event *event) | |||
531 | /* | 531 | /* |
532 | * Setup the hardware configuration for a given attr_type | 532 | * Setup the hardware configuration for a given attr_type |
533 | */ | 533 | */ |
534 | static int __hw_perf_event_init(struct perf_event *event) | 534 | static int __x86_pmu_event_init(struct perf_event *event) |
535 | { | 535 | { |
536 | int err; | 536 | int err; |
537 | 537 | ||
@@ -584,7 +584,7 @@ static void x86_pmu_disable_all(void) | |||
584 | } | 584 | } |
585 | } | 585 | } |
586 | 586 | ||
587 | void hw_perf_disable(void) | 587 | static void x86_pmu_disable(struct pmu *pmu) |
588 | { | 588 | { |
589 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 589 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
590 | 590 | ||
@@ -619,7 +619,7 @@ static void x86_pmu_enable_all(int added) | |||
619 | } | 619 | } |
620 | } | 620 | } |
621 | 621 | ||
622 | static const struct pmu pmu; | 622 | static struct pmu pmu; |
623 | 623 | ||
624 | static inline int is_x86_event(struct perf_event *event) | 624 | static inline int is_x86_event(struct perf_event *event) |
625 | { | 625 | { |
@@ -801,10 +801,10 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, | |||
801 | hwc->last_tag == cpuc->tags[i]; | 801 | hwc->last_tag == cpuc->tags[i]; |
802 | } | 802 | } |
803 | 803 | ||
804 | static int x86_pmu_start(struct perf_event *event); | 804 | static void x86_pmu_start(struct perf_event *event, int flags); |
805 | static void x86_pmu_stop(struct perf_event *event); | 805 | static void x86_pmu_stop(struct perf_event *event, int flags); |
806 | 806 | ||
807 | void hw_perf_enable(void) | 807 | static void x86_pmu_enable(struct pmu *pmu) |
808 | { | 808 | { |
809 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 809 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
810 | struct perf_event *event; | 810 | struct perf_event *event; |
@@ -840,7 +840,14 @@ void hw_perf_enable(void) | |||
840 | match_prev_assignment(hwc, cpuc, i)) | 840 | match_prev_assignment(hwc, cpuc, i)) |
841 | continue; | 841 | continue; |
842 | 842 | ||
843 | x86_pmu_stop(event); | 843 | /* |
844 | * Ensure we don't accidentally enable a stopped | ||
845 | * counter simply because we rescheduled. | ||
846 | */ | ||
847 | if (hwc->state & PERF_HES_STOPPED) | ||
848 | hwc->state |= PERF_HES_ARCH; | ||
849 | |||
850 | x86_pmu_stop(event, PERF_EF_UPDATE); | ||
844 | } | 851 | } |
845 | 852 | ||
846 | for (i = 0; i < cpuc->n_events; i++) { | 853 | for (i = 0; i < cpuc->n_events; i++) { |
@@ -852,7 +859,10 @@ void hw_perf_enable(void) | |||
852 | else if (i < n_running) | 859 | else if (i < n_running) |
853 | continue; | 860 | continue; |
854 | 861 | ||
855 | x86_pmu_start(event); | 862 | if (hwc->state & PERF_HES_ARCH) |
863 | continue; | ||
864 | |||
865 | x86_pmu_start(event, PERF_EF_RELOAD); | ||
856 | } | 866 | } |
857 | cpuc->n_added = 0; | 867 | cpuc->n_added = 0; |
858 | perf_events_lapic_init(); | 868 | perf_events_lapic_init(); |
@@ -953,15 +963,12 @@ static void x86_pmu_enable_event(struct perf_event *event) | |||
953 | } | 963 | } |
954 | 964 | ||
955 | /* | 965 | /* |
956 | * activate a single event | 966 | * Add a single event to the PMU. |
957 | * | 967 | * |
958 | * The event is added to the group of enabled events | 968 | * The event is added to the group of enabled events |
959 | * but only if it can be scehduled with existing events. | 969 | * but only if it can be scehduled with existing events. |
960 | * | ||
961 | * Called with PMU disabled. If successful and return value 1, | ||
962 | * then guaranteed to call perf_enable() and hw_perf_enable() | ||
963 | */ | 970 | */ |
964 | static int x86_pmu_enable(struct perf_event *event) | 971 | static int x86_pmu_add(struct perf_event *event, int flags) |
965 | { | 972 | { |
966 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 973 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
967 | struct hw_perf_event *hwc; | 974 | struct hw_perf_event *hwc; |
@@ -970,58 +977,67 @@ static int x86_pmu_enable(struct perf_event *event) | |||
970 | 977 | ||
971 | hwc = &event->hw; | 978 | hwc = &event->hw; |
972 | 979 | ||
980 | perf_pmu_disable(event->pmu); | ||
973 | n0 = cpuc->n_events; | 981 | n0 = cpuc->n_events; |
974 | n = collect_events(cpuc, event, false); | 982 | ret = n = collect_events(cpuc, event, false); |
975 | if (n < 0) | 983 | if (ret < 0) |
976 | return n; | 984 | goto out; |
985 | |||
986 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
987 | if (!(flags & PERF_EF_START)) | ||
988 | hwc->state |= PERF_HES_ARCH; | ||
977 | 989 | ||
978 | /* | 990 | /* |
979 | * If group events scheduling transaction was started, | 991 | * If group events scheduling transaction was started, |
980 | * skip the schedulability test here, it will be peformed | 992 | * skip the schedulability test here, it will be peformed |
981 | * at commit time(->commit_txn) as a whole | 993 | * at commit time (->commit_txn) as a whole |
982 | */ | 994 | */ |
983 | if (cpuc->group_flag & PERF_EVENT_TXN) | 995 | if (cpuc->group_flag & PERF_EVENT_TXN) |
984 | goto out; | 996 | goto done_collect; |
985 | 997 | ||
986 | ret = x86_pmu.schedule_events(cpuc, n, assign); | 998 | ret = x86_pmu.schedule_events(cpuc, n, assign); |
987 | if (ret) | 999 | if (ret) |
988 | return ret; | 1000 | goto out; |
989 | /* | 1001 | /* |
990 | * copy new assignment, now we know it is possible | 1002 | * copy new assignment, now we know it is possible |
991 | * will be used by hw_perf_enable() | 1003 | * will be used by hw_perf_enable() |
992 | */ | 1004 | */ |
993 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1005 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
994 | 1006 | ||
995 | out: | 1007 | done_collect: |
996 | cpuc->n_events = n; | 1008 | cpuc->n_events = n; |
997 | cpuc->n_added += n - n0; | 1009 | cpuc->n_added += n - n0; |
998 | cpuc->n_txn += n - n0; | 1010 | cpuc->n_txn += n - n0; |
999 | 1011 | ||
1000 | return 0; | 1012 | ret = 0; |
1013 | out: | ||
1014 | perf_pmu_enable(event->pmu); | ||
1015 | return ret; | ||
1001 | } | 1016 | } |
1002 | 1017 | ||
1003 | static int x86_pmu_start(struct perf_event *event) | 1018 | static void x86_pmu_start(struct perf_event *event, int flags) |
1004 | { | 1019 | { |
1005 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1020 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1006 | int idx = event->hw.idx; | 1021 | int idx = event->hw.idx; |
1007 | 1022 | ||
1008 | if (idx == -1) | 1023 | if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) |
1009 | return -EAGAIN; | 1024 | return; |
1025 | |||
1026 | if (WARN_ON_ONCE(idx == -1)) | ||
1027 | return; | ||
1028 | |||
1029 | if (flags & PERF_EF_RELOAD) { | ||
1030 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
1031 | x86_perf_event_set_period(event); | ||
1032 | } | ||
1033 | |||
1034 | event->hw.state = 0; | ||
1010 | 1035 | ||
1011 | x86_perf_event_set_period(event); | ||
1012 | cpuc->events[idx] = event; | 1036 | cpuc->events[idx] = event; |
1013 | __set_bit(idx, cpuc->active_mask); | 1037 | __set_bit(idx, cpuc->active_mask); |
1014 | __set_bit(idx, cpuc->running); | 1038 | __set_bit(idx, cpuc->running); |
1015 | x86_pmu.enable(event); | 1039 | x86_pmu.enable(event); |
1016 | perf_event_update_userpage(event); | 1040 | perf_event_update_userpage(event); |
1017 | |||
1018 | return 0; | ||
1019 | } | ||
1020 | |||
1021 | static void x86_pmu_unthrottle(struct perf_event *event) | ||
1022 | { | ||
1023 | int ret = x86_pmu_start(event); | ||
1024 | WARN_ON_ONCE(ret); | ||
1025 | } | 1041 | } |
1026 | 1042 | ||
1027 | void perf_event_print_debug(void) | 1043 | void perf_event_print_debug(void) |
@@ -1078,27 +1094,29 @@ void perf_event_print_debug(void) | |||
1078 | local_irq_restore(flags); | 1094 | local_irq_restore(flags); |
1079 | } | 1095 | } |
1080 | 1096 | ||
1081 | static void x86_pmu_stop(struct perf_event *event) | 1097 | static void x86_pmu_stop(struct perf_event *event, int flags) |
1082 | { | 1098 | { |
1083 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1099 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1084 | struct hw_perf_event *hwc = &event->hw; | 1100 | struct hw_perf_event *hwc = &event->hw; |
1085 | int idx = hwc->idx; | ||
1086 | 1101 | ||
1087 | if (!__test_and_clear_bit(idx, cpuc->active_mask)) | 1102 | if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) { |
1088 | return; | 1103 | x86_pmu.disable(event); |
1089 | 1104 | cpuc->events[hwc->idx] = NULL; | |
1090 | x86_pmu.disable(event); | 1105 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); |
1091 | 1106 | hwc->state |= PERF_HES_STOPPED; | |
1092 | /* | 1107 | } |
1093 | * Drain the remaining delta count out of a event | ||
1094 | * that we are disabling: | ||
1095 | */ | ||
1096 | x86_perf_event_update(event); | ||
1097 | 1108 | ||
1098 | cpuc->events[idx] = NULL; | 1109 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { |
1110 | /* | ||
1111 | * Drain the remaining delta count out of a event | ||
1112 | * that we are disabling: | ||
1113 | */ | ||
1114 | x86_perf_event_update(event); | ||
1115 | hwc->state |= PERF_HES_UPTODATE; | ||
1116 | } | ||
1099 | } | 1117 | } |
1100 | 1118 | ||
1101 | static void x86_pmu_disable(struct perf_event *event) | 1119 | static void x86_pmu_del(struct perf_event *event, int flags) |
1102 | { | 1120 | { |
1103 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1121 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1104 | int i; | 1122 | int i; |
@@ -1111,7 +1129,7 @@ static void x86_pmu_disable(struct perf_event *event) | |||
1111 | if (cpuc->group_flag & PERF_EVENT_TXN) | 1129 | if (cpuc->group_flag & PERF_EVENT_TXN) |
1112 | return; | 1130 | return; |
1113 | 1131 | ||
1114 | x86_pmu_stop(event); | 1132 | x86_pmu_stop(event, PERF_EF_UPDATE); |
1115 | 1133 | ||
1116 | for (i = 0; i < cpuc->n_events; i++) { | 1134 | for (i = 0; i < cpuc->n_events; i++) { |
1117 | if (event == cpuc->event_list[i]) { | 1135 | if (event == cpuc->event_list[i]) { |
@@ -1134,7 +1152,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1134 | struct perf_sample_data data; | 1152 | struct perf_sample_data data; |
1135 | struct cpu_hw_events *cpuc; | 1153 | struct cpu_hw_events *cpuc; |
1136 | struct perf_event *event; | 1154 | struct perf_event *event; |
1137 | struct hw_perf_event *hwc; | ||
1138 | int idx, handled = 0; | 1155 | int idx, handled = 0; |
1139 | u64 val; | 1156 | u64 val; |
1140 | 1157 | ||
@@ -1155,7 +1172,6 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1155 | } | 1172 | } |
1156 | 1173 | ||
1157 | event = cpuc->events[idx]; | 1174 | event = cpuc->events[idx]; |
1158 | hwc = &event->hw; | ||
1159 | 1175 | ||
1160 | val = x86_perf_event_update(event); | 1176 | val = x86_perf_event_update(event); |
1161 | if (val & (1ULL << (x86_pmu.cntval_bits - 1))) | 1177 | if (val & (1ULL << (x86_pmu.cntval_bits - 1))) |
@@ -1171,7 +1187,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1171 | continue; | 1187 | continue; |
1172 | 1188 | ||
1173 | if (perf_event_overflow(event, 1, &data, regs)) | 1189 | if (perf_event_overflow(event, 1, &data, regs)) |
1174 | x86_pmu_stop(event); | 1190 | x86_pmu_stop(event, 0); |
1175 | } | 1191 | } |
1176 | 1192 | ||
1177 | if (handled) | 1193 | if (handled) |
@@ -1388,7 +1404,6 @@ void __init init_hw_perf_events(void) | |||
1388 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; | 1404 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; |
1389 | } | 1405 | } |
1390 | x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; | 1406 | x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; |
1391 | perf_max_events = x86_pmu.num_counters; | ||
1392 | 1407 | ||
1393 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { | 1408 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { |
1394 | WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", | 1409 | WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", |
@@ -1424,6 +1439,7 @@ void __init init_hw_perf_events(void) | |||
1424 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); | 1439 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); |
1425 | pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); | 1440 | pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); |
1426 | 1441 | ||
1442 | perf_pmu_register(&pmu); | ||
1427 | perf_cpu_notifier(x86_pmu_notifier); | 1443 | perf_cpu_notifier(x86_pmu_notifier); |
1428 | } | 1444 | } |
1429 | 1445 | ||
@@ -1437,10 +1453,11 @@ static inline void x86_pmu_read(struct perf_event *event) | |||
1437 | * Set the flag to make pmu::enable() not perform the | 1453 | * Set the flag to make pmu::enable() not perform the |
1438 | * schedulability test, it will be performed at commit time | 1454 | * schedulability test, it will be performed at commit time |
1439 | */ | 1455 | */ |
1440 | static void x86_pmu_start_txn(const struct pmu *pmu) | 1456 | static void x86_pmu_start_txn(struct pmu *pmu) |
1441 | { | 1457 | { |
1442 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1458 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1443 | 1459 | ||
1460 | perf_pmu_disable(pmu); | ||
1444 | cpuc->group_flag |= PERF_EVENT_TXN; | 1461 | cpuc->group_flag |= PERF_EVENT_TXN; |
1445 | cpuc->n_txn = 0; | 1462 | cpuc->n_txn = 0; |
1446 | } | 1463 | } |
@@ -1450,7 +1467,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu) | |||
1450 | * Clear the flag and pmu::enable() will perform the | 1467 | * Clear the flag and pmu::enable() will perform the |
1451 | * schedulability test. | 1468 | * schedulability test. |
1452 | */ | 1469 | */ |
1453 | static void x86_pmu_cancel_txn(const struct pmu *pmu) | 1470 | static void x86_pmu_cancel_txn(struct pmu *pmu) |
1454 | { | 1471 | { |
1455 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1472 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1456 | 1473 | ||
@@ -1460,6 +1477,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
1460 | */ | 1477 | */ |
1461 | cpuc->n_added -= cpuc->n_txn; | 1478 | cpuc->n_added -= cpuc->n_txn; |
1462 | cpuc->n_events -= cpuc->n_txn; | 1479 | cpuc->n_events -= cpuc->n_txn; |
1480 | perf_pmu_enable(pmu); | ||
1463 | } | 1481 | } |
1464 | 1482 | ||
1465 | /* | 1483 | /* |
@@ -1467,7 +1485,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu) | |||
1467 | * Perform the group schedulability test as a whole | 1485 | * Perform the group schedulability test as a whole |
1468 | * Return 0 if success | 1486 | * Return 0 if success |
1469 | */ | 1487 | */ |
1470 | static int x86_pmu_commit_txn(const struct pmu *pmu) | 1488 | static int x86_pmu_commit_txn(struct pmu *pmu) |
1471 | { | 1489 | { |
1472 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1490 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
1473 | int assign[X86_PMC_IDX_MAX]; | 1491 | int assign[X86_PMC_IDX_MAX]; |
@@ -1489,22 +1507,10 @@ static int x86_pmu_commit_txn(const struct pmu *pmu) | |||
1489 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 1507 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
1490 | 1508 | ||
1491 | cpuc->group_flag &= ~PERF_EVENT_TXN; | 1509 | cpuc->group_flag &= ~PERF_EVENT_TXN; |
1492 | 1510 | perf_pmu_enable(pmu); | |
1493 | return 0; | 1511 | return 0; |
1494 | } | 1512 | } |
1495 | 1513 | ||
1496 | static const struct pmu pmu = { | ||
1497 | .enable = x86_pmu_enable, | ||
1498 | .disable = x86_pmu_disable, | ||
1499 | .start = x86_pmu_start, | ||
1500 | .stop = x86_pmu_stop, | ||
1501 | .read = x86_pmu_read, | ||
1502 | .unthrottle = x86_pmu_unthrottle, | ||
1503 | .start_txn = x86_pmu_start_txn, | ||
1504 | .cancel_txn = x86_pmu_cancel_txn, | ||
1505 | .commit_txn = x86_pmu_commit_txn, | ||
1506 | }; | ||
1507 | |||
1508 | /* | 1514 | /* |
1509 | * validate that we can schedule this event | 1515 | * validate that we can schedule this event |
1510 | */ | 1516 | */ |
@@ -1579,12 +1585,22 @@ out: | |||
1579 | return ret; | 1585 | return ret; |
1580 | } | 1586 | } |
1581 | 1587 | ||
1582 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1588 | int x86_pmu_event_init(struct perf_event *event) |
1583 | { | 1589 | { |
1584 | const struct pmu *tmp; | 1590 | struct pmu *tmp; |
1585 | int err; | 1591 | int err; |
1586 | 1592 | ||
1587 | err = __hw_perf_event_init(event); | 1593 | switch (event->attr.type) { |
1594 | case PERF_TYPE_RAW: | ||
1595 | case PERF_TYPE_HARDWARE: | ||
1596 | case PERF_TYPE_HW_CACHE: | ||
1597 | break; | ||
1598 | |||
1599 | default: | ||
1600 | return -ENOENT; | ||
1601 | } | ||
1602 | |||
1603 | err = __x86_pmu_event_init(event); | ||
1588 | if (!err) { | 1604 | if (!err) { |
1589 | /* | 1605 | /* |
1590 | * we temporarily connect event to its pmu | 1606 | * we temporarily connect event to its pmu |
@@ -1604,26 +1620,31 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1604 | if (err) { | 1620 | if (err) { |
1605 | if (event->destroy) | 1621 | if (event->destroy) |
1606 | event->destroy(event); | 1622 | event->destroy(event); |
1607 | return ERR_PTR(err); | ||
1608 | } | 1623 | } |
1609 | 1624 | ||
1610 | return &pmu; | 1625 | return err; |
1611 | } | 1626 | } |
1612 | 1627 | ||
1613 | /* | 1628 | static struct pmu pmu = { |
1614 | * callchain support | 1629 | .pmu_enable = x86_pmu_enable, |
1615 | */ | 1630 | .pmu_disable = x86_pmu_disable, |
1616 | 1631 | ||
1617 | static inline | 1632 | .event_init = x86_pmu_event_init, |
1618 | void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1619 | { | ||
1620 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
1621 | entry->ip[entry->nr++] = ip; | ||
1622 | } | ||
1623 | 1633 | ||
1624 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | 1634 | .add = x86_pmu_add, |
1625 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); | 1635 | .del = x86_pmu_del, |
1636 | .start = x86_pmu_start, | ||
1637 | .stop = x86_pmu_stop, | ||
1638 | .read = x86_pmu_read, | ||
1626 | 1639 | ||
1640 | .start_txn = x86_pmu_start_txn, | ||
1641 | .cancel_txn = x86_pmu_cancel_txn, | ||
1642 | .commit_txn = x86_pmu_commit_txn, | ||
1643 | }; | ||
1644 | |||
1645 | /* | ||
1646 | * callchain support | ||
1647 | */ | ||
1627 | 1648 | ||
1628 | static void | 1649 | static void |
1629 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) | 1650 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) |
@@ -1645,7 +1666,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
1645 | { | 1666 | { |
1646 | struct perf_callchain_entry *entry = data; | 1667 | struct perf_callchain_entry *entry = data; |
1647 | 1668 | ||
1648 | callchain_store(entry, addr); | 1669 | perf_callchain_store(entry, addr); |
1649 | } | 1670 | } |
1650 | 1671 | ||
1651 | static const struct stacktrace_ops backtrace_ops = { | 1672 | static const struct stacktrace_ops backtrace_ops = { |
@@ -1656,11 +1677,15 @@ static const struct stacktrace_ops backtrace_ops = { | |||
1656 | .walk_stack = print_context_stack_bp, | 1677 | .walk_stack = print_context_stack_bp, |
1657 | }; | 1678 | }; |
1658 | 1679 | ||
1659 | static void | 1680 | void |
1660 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1681 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
1661 | { | 1682 | { |
1662 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 1683 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
1663 | callchain_store(entry, regs->ip); | 1684 | /* TODO: We don't support guest os callchain now */ |
1685 | return; | ||
1686 | } | ||
1687 | |||
1688 | perf_callchain_store(entry, regs->ip); | ||
1664 | 1689 | ||
1665 | dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); | 1690 | dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); |
1666 | } | 1691 | } |
@@ -1689,7 +1714,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1689 | if (fp < compat_ptr(regs->sp)) | 1714 | if (fp < compat_ptr(regs->sp)) |
1690 | break; | 1715 | break; |
1691 | 1716 | ||
1692 | callchain_store(entry, frame.return_address); | 1717 | perf_callchain_store(entry, frame.return_address); |
1693 | fp = compat_ptr(frame.next_frame); | 1718 | fp = compat_ptr(frame.next_frame); |
1694 | } | 1719 | } |
1695 | return 1; | 1720 | return 1; |
@@ -1702,19 +1727,20 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1702 | } | 1727 | } |
1703 | #endif | 1728 | #endif |
1704 | 1729 | ||
1705 | static void | 1730 | void |
1706 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1731 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
1707 | { | 1732 | { |
1708 | struct stack_frame frame; | 1733 | struct stack_frame frame; |
1709 | const void __user *fp; | 1734 | const void __user *fp; |
1710 | 1735 | ||
1711 | if (!user_mode(regs)) | 1736 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
1712 | regs = task_pt_regs(current); | 1737 | /* TODO: We don't support guest os callchain now */ |
1738 | return; | ||
1739 | } | ||
1713 | 1740 | ||
1714 | fp = (void __user *)regs->bp; | 1741 | fp = (void __user *)regs->bp; |
1715 | 1742 | ||
1716 | callchain_store(entry, PERF_CONTEXT_USER); | 1743 | perf_callchain_store(entry, regs->ip); |
1717 | callchain_store(entry, regs->ip); | ||
1718 | 1744 | ||
1719 | if (perf_callchain_user32(regs, entry)) | 1745 | if (perf_callchain_user32(regs, entry)) |
1720 | return; | 1746 | return; |
@@ -1731,52 +1757,11 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1731 | if ((unsigned long)fp < regs->sp) | 1757 | if ((unsigned long)fp < regs->sp) |
1732 | break; | 1758 | break; |
1733 | 1759 | ||
1734 | callchain_store(entry, frame.return_address); | 1760 | perf_callchain_store(entry, frame.return_address); |
1735 | fp = frame.next_frame; | 1761 | fp = frame.next_frame; |
1736 | } | 1762 | } |
1737 | } | 1763 | } |
1738 | 1764 | ||
1739 | static void | ||
1740 | perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1741 | { | ||
1742 | int is_user; | ||
1743 | |||
1744 | if (!regs) | ||
1745 | return; | ||
1746 | |||
1747 | is_user = user_mode(regs); | ||
1748 | |||
1749 | if (is_user && current->state != TASK_RUNNING) | ||
1750 | return; | ||
1751 | |||
1752 | if (!is_user) | ||
1753 | perf_callchain_kernel(regs, entry); | ||
1754 | |||
1755 | if (current->mm) | ||
1756 | perf_callchain_user(regs, entry); | ||
1757 | } | ||
1758 | |||
1759 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
1760 | { | ||
1761 | struct perf_callchain_entry *entry; | ||
1762 | |||
1763 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | ||
1764 | /* TODO: We don't support guest os callchain now */ | ||
1765 | return NULL; | ||
1766 | } | ||
1767 | |||
1768 | if (in_nmi()) | ||
1769 | entry = &__get_cpu_var(pmc_nmi_entry); | ||
1770 | else | ||
1771 | entry = &__get_cpu_var(pmc_irq_entry); | ||
1772 | |||
1773 | entry->nr = 0; | ||
1774 | |||
1775 | perf_do_callchain(regs, entry); | ||
1776 | |||
1777 | return entry; | ||
1778 | } | ||
1779 | |||
1780 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | 1765 | unsigned long perf_instruction_pointer(struct pt_regs *regs) |
1781 | { | 1766 | { |
1782 | unsigned long ip; | 1767 | unsigned long ip; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index ee05c90012d2..c8f5c088cad1 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -713,18 +713,18 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
713 | struct cpu_hw_events *cpuc; | 713 | struct cpu_hw_events *cpuc; |
714 | int bit, loops; | 714 | int bit, loops; |
715 | u64 status; | 715 | u64 status; |
716 | int handled = 0; | 716 | int handled; |
717 | 717 | ||
718 | perf_sample_data_init(&data, 0); | 718 | perf_sample_data_init(&data, 0); |
719 | 719 | ||
720 | cpuc = &__get_cpu_var(cpu_hw_events); | 720 | cpuc = &__get_cpu_var(cpu_hw_events); |
721 | 721 | ||
722 | intel_pmu_disable_all(); | 722 | intel_pmu_disable_all(); |
723 | intel_pmu_drain_bts_buffer(); | 723 | handled = intel_pmu_drain_bts_buffer(); |
724 | status = intel_pmu_get_status(); | 724 | status = intel_pmu_get_status(); |
725 | if (!status) { | 725 | if (!status) { |
726 | intel_pmu_enable_all(0); | 726 | intel_pmu_enable_all(0); |
727 | return 0; | 727 | return handled; |
728 | } | 728 | } |
729 | 729 | ||
730 | loops = 0; | 730 | loops = 0; |
@@ -763,7 +763,7 @@ again: | |||
763 | data.period = event->hw.last_period; | 763 | data.period = event->hw.last_period; |
764 | 764 | ||
765 | if (perf_event_overflow(event, 1, &data, regs)) | 765 | if (perf_event_overflow(event, 1, &data, regs)) |
766 | x86_pmu_stop(event); | 766 | x86_pmu_stop(event, 0); |
767 | } | 767 | } |
768 | 768 | ||
769 | /* | 769 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 18018d1311cd..4977f9c400e5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -214,7 +214,7 @@ static void intel_pmu_disable_bts(void) | |||
214 | update_debugctlmsr(debugctlmsr); | 214 | update_debugctlmsr(debugctlmsr); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void intel_pmu_drain_bts_buffer(void) | 217 | static int intel_pmu_drain_bts_buffer(void) |
218 | { | 218 | { |
219 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 219 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
220 | struct debug_store *ds = cpuc->ds; | 220 | struct debug_store *ds = cpuc->ds; |
@@ -231,16 +231,16 @@ static void intel_pmu_drain_bts_buffer(void) | |||
231 | struct pt_regs regs; | 231 | struct pt_regs regs; |
232 | 232 | ||
233 | if (!event) | 233 | if (!event) |
234 | return; | 234 | return 0; |
235 | 235 | ||
236 | if (!ds) | 236 | if (!ds) |
237 | return; | 237 | return 0; |
238 | 238 | ||
239 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | 239 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; |
240 | top = (struct bts_record *)(unsigned long)ds->bts_index; | 240 | top = (struct bts_record *)(unsigned long)ds->bts_index; |
241 | 241 | ||
242 | if (top <= at) | 242 | if (top <= at) |
243 | return; | 243 | return 0; |
244 | 244 | ||
245 | ds->bts_index = ds->bts_buffer_base; | 245 | ds->bts_index = ds->bts_buffer_base; |
246 | 246 | ||
@@ -256,7 +256,7 @@ static void intel_pmu_drain_bts_buffer(void) | |||
256 | perf_prepare_sample(&header, &data, event, ®s); | 256 | perf_prepare_sample(&header, &data, event, ®s); |
257 | 257 | ||
258 | if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) | 258 | if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) |
259 | return; | 259 | return 1; |
260 | 260 | ||
261 | for (; at < top; at++) { | 261 | for (; at < top; at++) { |
262 | data.ip = at->from; | 262 | data.ip = at->from; |
@@ -270,6 +270,7 @@ static void intel_pmu_drain_bts_buffer(void) | |||
270 | /* There's new data available. */ | 270 | /* There's new data available. */ |
271 | event->hw.interrupts++; | 271 | event->hw.interrupts++; |
272 | event->pending_kill = POLL_IN; | 272 | event->pending_kill = POLL_IN; |
273 | return 1; | ||
273 | } | 274 | } |
274 | 275 | ||
275 | /* | 276 | /* |
@@ -491,7 +492,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, | |||
491 | regs.flags &= ~PERF_EFLAGS_EXACT; | 492 | regs.flags &= ~PERF_EFLAGS_EXACT; |
492 | 493 | ||
493 | if (perf_event_overflow(event, 1, &data, ®s)) | 494 | if (perf_event_overflow(event, 1, &data, ®s)) |
494 | x86_pmu_stop(event); | 495 | x86_pmu_stop(event, 0); |
495 | } | 496 | } |
496 | 497 | ||
497 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) | 498 | static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index b560db3305be..81400b93e694 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -18,6 +18,8 @@ | |||
18 | struct p4_event_bind { | 18 | struct p4_event_bind { |
19 | unsigned int opcode; /* Event code and ESCR selector */ | 19 | unsigned int opcode; /* Event code and ESCR selector */ |
20 | unsigned int escr_msr[2]; /* ESCR MSR for this event */ | 20 | unsigned int escr_msr[2]; /* ESCR MSR for this event */ |
21 | unsigned int escr_emask; /* valid ESCR EventMask bits */ | ||
22 | unsigned int shared; /* event is shared across threads */ | ||
21 | char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ | 23 | char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ |
22 | }; | 24 | }; |
23 | 25 | ||
@@ -66,231 +68,435 @@ static struct p4_event_bind p4_event_bind_map[] = { | |||
66 | [P4_EVENT_TC_DELIVER_MODE] = { | 68 | [P4_EVENT_TC_DELIVER_MODE] = { |
67 | .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE), | 69 | .opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE), |
68 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, | 70 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, |
71 | .escr_emask = | ||
72 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD) | | ||
73 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DB) | | ||
74 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DI) | | ||
75 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BD) | | ||
76 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BB) | | ||
77 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, BI) | | ||
78 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, ID), | ||
79 | .shared = 1, | ||
69 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 80 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
70 | }, | 81 | }, |
71 | [P4_EVENT_BPU_FETCH_REQUEST] = { | 82 | [P4_EVENT_BPU_FETCH_REQUEST] = { |
72 | .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST), | 83 | .opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST), |
73 | .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 }, | 84 | .escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 }, |
85 | .escr_emask = | ||
86 | P4_ESCR_EMASK_BIT(P4_EVENT_BPU_FETCH_REQUEST, TCMISS), | ||
74 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 87 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
75 | }, | 88 | }, |
76 | [P4_EVENT_ITLB_REFERENCE] = { | 89 | [P4_EVENT_ITLB_REFERENCE] = { |
77 | .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE), | 90 | .opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE), |
78 | .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, | 91 | .escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 }, |
92 | .escr_emask = | ||
93 | P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT) | | ||
94 | P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, MISS) | | ||
95 | P4_ESCR_EMASK_BIT(P4_EVENT_ITLB_REFERENCE, HIT_UK), | ||
79 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 96 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
80 | }, | 97 | }, |
81 | [P4_EVENT_MEMORY_CANCEL] = { | 98 | [P4_EVENT_MEMORY_CANCEL] = { |
82 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL), | 99 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL), |
83 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, | 100 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, |
101 | .escr_emask = | ||
102 | P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL) | | ||
103 | P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_CANCEL, 64K_CONF), | ||
84 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 104 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
85 | }, | 105 | }, |
86 | [P4_EVENT_MEMORY_COMPLETE] = { | 106 | [P4_EVENT_MEMORY_COMPLETE] = { |
87 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE), | 107 | .opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE), |
88 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, | 108 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, |
109 | .escr_emask = | ||
110 | P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, LSC) | | ||
111 | P4_ESCR_EMASK_BIT(P4_EVENT_MEMORY_COMPLETE, SSC), | ||
89 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 112 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
90 | }, | 113 | }, |
91 | [P4_EVENT_LOAD_PORT_REPLAY] = { | 114 | [P4_EVENT_LOAD_PORT_REPLAY] = { |
92 | .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY), | 115 | .opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY), |
93 | .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 }, | 116 | .escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 }, |
117 | .escr_emask = | ||
118 | P4_ESCR_EMASK_BIT(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD), | ||
94 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 119 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
95 | }, | 120 | }, |
96 | [P4_EVENT_STORE_PORT_REPLAY] = { | 121 | [P4_EVENT_STORE_PORT_REPLAY] = { |
97 | .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY), | 122 | .opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY), |
98 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, | 123 | .escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 }, |
124 | .escr_emask = | ||
125 | P4_ESCR_EMASK_BIT(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST), | ||
99 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 126 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
100 | }, | 127 | }, |
101 | [P4_EVENT_MOB_LOAD_REPLAY] = { | 128 | [P4_EVENT_MOB_LOAD_REPLAY] = { |
102 | .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY), | 129 | .opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY), |
103 | .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 }, | 130 | .escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 }, |
131 | .escr_emask = | ||
132 | P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STA) | | ||
133 | P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, NO_STD) | | ||
134 | P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA) | | ||
135 | P4_ESCR_EMASK_BIT(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR), | ||
104 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 136 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
105 | }, | 137 | }, |
106 | [P4_EVENT_PAGE_WALK_TYPE] = { | 138 | [P4_EVENT_PAGE_WALK_TYPE] = { |
107 | .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE), | 139 | .opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE), |
108 | .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 }, | 140 | .escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 }, |
141 | .escr_emask = | ||
142 | P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, DTMISS) | | ||
143 | P4_ESCR_EMASK_BIT(P4_EVENT_PAGE_WALK_TYPE, ITMISS), | ||
144 | .shared = 1, | ||
109 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 145 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
110 | }, | 146 | }, |
111 | [P4_EVENT_BSQ_CACHE_REFERENCE] = { | 147 | [P4_EVENT_BSQ_CACHE_REFERENCE] = { |
112 | .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE), | 148 | .opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE), |
113 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, | 149 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 }, |
150 | .escr_emask = | ||
151 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) | | ||
152 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) | | ||
153 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) | | ||
154 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) | | ||
155 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) | | ||
156 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM) | | ||
157 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) | | ||
158 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) | | ||
159 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS), | ||
114 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 160 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
115 | }, | 161 | }, |
116 | [P4_EVENT_IOQ_ALLOCATION] = { | 162 | [P4_EVENT_IOQ_ALLOCATION] = { |
117 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION), | 163 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION), |
118 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 164 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
165 | .escr_emask = | ||
166 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, DEFAULT) | | ||
167 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_READ) | | ||
168 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE) | | ||
169 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_UC) | | ||
170 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WC) | | ||
171 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WT) | | ||
172 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WP) | | ||
173 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, MEM_WB) | | ||
174 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OWN) | | ||
175 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, OTHER) | | ||
176 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ALLOCATION, PREFETCH), | ||
119 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 177 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
120 | }, | 178 | }, |
121 | [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */ | 179 | [P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */ |
122 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES), | 180 | .opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES), |
123 | .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 }, | 181 | .escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 }, |
182 | .escr_emask = | ||
183 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT) | | ||
184 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ) | | ||
185 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE) | | ||
186 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC) | | ||
187 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC) | | ||
188 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT) | | ||
189 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP) | | ||
190 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB) | | ||
191 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN) | | ||
192 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER) | | ||
193 | P4_ESCR_EMASK_BIT(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH), | ||
124 | .cntr = { {2, -1, -1}, {3, -1, -1} }, | 194 | .cntr = { {2, -1, -1}, {3, -1, -1} }, |
125 | }, | 195 | }, |
126 | [P4_EVENT_FSB_DATA_ACTIVITY] = { | 196 | [P4_EVENT_FSB_DATA_ACTIVITY] = { |
127 | .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY), | 197 | .opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY), |
128 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 198 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
199 | .escr_emask = | ||
200 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) | | ||
201 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN) | | ||
202 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER) | | ||
203 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV) | | ||
204 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN) | | ||
205 | P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER), | ||
206 | .shared = 1, | ||
129 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 207 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
130 | }, | 208 | }, |
131 | [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */ | 209 | [P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */ |
132 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION), | 210 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION), |
133 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 }, | 211 | .escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 }, |
212 | .escr_emask = | ||
213 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0) | | ||
214 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1) | | ||
215 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0) | | ||
216 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1) | | ||
217 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE) | | ||
218 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE) | | ||
219 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE) | | ||
220 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE) | | ||
221 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE) | | ||
222 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE) | | ||
223 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0) | | ||
224 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1) | | ||
225 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2), | ||
134 | .cntr = { {0, -1, -1}, {1, -1, -1} }, | 226 | .cntr = { {0, -1, -1}, {1, -1, -1} }, |
135 | }, | 227 | }, |
136 | [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */ | 228 | [P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */ |
137 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES), | 229 | .opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES), |
138 | .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 }, | 230 | .escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 }, |
231 | .escr_emask = | ||
232 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0) | | ||
233 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1) | | ||
234 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0) | | ||
235 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1) | | ||
236 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE) | | ||
237 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE) | | ||
238 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE) | | ||
239 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE) | | ||
240 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE) | | ||
241 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE) | | ||
242 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0) | | ||
243 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1) | | ||
244 | P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2), | ||
139 | .cntr = { {2, -1, -1}, {3, -1, -1} }, | 245 | .cntr = { {2, -1, -1}, {3, -1, -1} }, |
140 | }, | 246 | }, |
141 | [P4_EVENT_SSE_INPUT_ASSIST] = { | 247 | [P4_EVENT_SSE_INPUT_ASSIST] = { |
142 | .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST), | 248 | .opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST), |
143 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 249 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
250 | .escr_emask = | ||
251 | P4_ESCR_EMASK_BIT(P4_EVENT_SSE_INPUT_ASSIST, ALL), | ||
252 | .shared = 1, | ||
144 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 253 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
145 | }, | 254 | }, |
146 | [P4_EVENT_PACKED_SP_UOP] = { | 255 | [P4_EVENT_PACKED_SP_UOP] = { |
147 | .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP), | 256 | .opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP), |
148 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 257 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
258 | .escr_emask = | ||
259 | P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_SP_UOP, ALL), | ||
260 | .shared = 1, | ||
149 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 261 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
150 | }, | 262 | }, |
151 | [P4_EVENT_PACKED_DP_UOP] = { | 263 | [P4_EVENT_PACKED_DP_UOP] = { |
152 | .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP), | 264 | .opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP), |
153 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 265 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
266 | .escr_emask = | ||
267 | P4_ESCR_EMASK_BIT(P4_EVENT_PACKED_DP_UOP, ALL), | ||
268 | .shared = 1, | ||
154 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 269 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
155 | }, | 270 | }, |
156 | [P4_EVENT_SCALAR_SP_UOP] = { | 271 | [P4_EVENT_SCALAR_SP_UOP] = { |
157 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP), | 272 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP), |
158 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 273 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
274 | .escr_emask = | ||
275 | P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_SP_UOP, ALL), | ||
276 | .shared = 1, | ||
159 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 277 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
160 | }, | 278 | }, |
161 | [P4_EVENT_SCALAR_DP_UOP] = { | 279 | [P4_EVENT_SCALAR_DP_UOP] = { |
162 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP), | 280 | .opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP), |
163 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 281 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
282 | .escr_emask = | ||
283 | P4_ESCR_EMASK_BIT(P4_EVENT_SCALAR_DP_UOP, ALL), | ||
284 | .shared = 1, | ||
164 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 285 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
165 | }, | 286 | }, |
166 | [P4_EVENT_64BIT_MMX_UOP] = { | 287 | [P4_EVENT_64BIT_MMX_UOP] = { |
167 | .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP), | 288 | .opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP), |
168 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 289 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
290 | .escr_emask = | ||
291 | P4_ESCR_EMASK_BIT(P4_EVENT_64BIT_MMX_UOP, ALL), | ||
292 | .shared = 1, | ||
169 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 293 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
170 | }, | 294 | }, |
171 | [P4_EVENT_128BIT_MMX_UOP] = { | 295 | [P4_EVENT_128BIT_MMX_UOP] = { |
172 | .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP), | 296 | .opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP), |
173 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 297 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
298 | .escr_emask = | ||
299 | P4_ESCR_EMASK_BIT(P4_EVENT_128BIT_MMX_UOP, ALL), | ||
300 | .shared = 1, | ||
174 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 301 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
175 | }, | 302 | }, |
176 | [P4_EVENT_X87_FP_UOP] = { | 303 | [P4_EVENT_X87_FP_UOP] = { |
177 | .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP), | 304 | .opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP), |
178 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, | 305 | .escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 }, |
306 | .escr_emask = | ||
307 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_FP_UOP, ALL), | ||
308 | .shared = 1, | ||
179 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 309 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
180 | }, | 310 | }, |
181 | [P4_EVENT_TC_MISC] = { | 311 | [P4_EVENT_TC_MISC] = { |
182 | .opcode = P4_OPCODE(P4_EVENT_TC_MISC), | 312 | .opcode = P4_OPCODE(P4_EVENT_TC_MISC), |
183 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, | 313 | .escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 }, |
314 | .escr_emask = | ||
315 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_MISC, FLUSH), | ||
184 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 316 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
185 | }, | 317 | }, |
186 | [P4_EVENT_GLOBAL_POWER_EVENTS] = { | 318 | [P4_EVENT_GLOBAL_POWER_EVENTS] = { |
187 | .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS), | 319 | .opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS), |
188 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 320 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
321 | .escr_emask = | ||
322 | P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING), | ||
189 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 323 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
190 | }, | 324 | }, |
191 | [P4_EVENT_TC_MS_XFER] = { | 325 | [P4_EVENT_TC_MS_XFER] = { |
192 | .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER), | 326 | .opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER), |
193 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, | 327 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, |
328 | .escr_emask = | ||
329 | P4_ESCR_EMASK_BIT(P4_EVENT_TC_MS_XFER, CISC), | ||
194 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 330 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
195 | }, | 331 | }, |
196 | [P4_EVENT_UOP_QUEUE_WRITES] = { | 332 | [P4_EVENT_UOP_QUEUE_WRITES] = { |
197 | .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES), | 333 | .opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES), |
198 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, | 334 | .escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 }, |
335 | .escr_emask = | ||
336 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD) | | ||
337 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER) | | ||
338 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM), | ||
199 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 339 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
200 | }, | 340 | }, |
201 | [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = { | 341 | [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = { |
202 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE), | 342 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE), |
203 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 }, | 343 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 }, |
344 | .escr_emask = | ||
345 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL) | | ||
346 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL) | | ||
347 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN) | | ||
348 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT), | ||
204 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 349 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
205 | }, | 350 | }, |
206 | [P4_EVENT_RETIRED_BRANCH_TYPE] = { | 351 | [P4_EVENT_RETIRED_BRANCH_TYPE] = { |
207 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE), | 352 | .opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE), |
208 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 }, | 353 | .escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 }, |
354 | .escr_emask = | ||
355 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) | | ||
356 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) | | ||
357 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) | | ||
358 | P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT), | ||
209 | .cntr = { {4, 5, -1}, {6, 7, -1} }, | 359 | .cntr = { {4, 5, -1}, {6, 7, -1} }, |
210 | }, | 360 | }, |
211 | [P4_EVENT_RESOURCE_STALL] = { | 361 | [P4_EVENT_RESOURCE_STALL] = { |
212 | .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL), | 362 | .opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL), |
213 | .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 }, | 363 | .escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 }, |
364 | .escr_emask = | ||
365 | P4_ESCR_EMASK_BIT(P4_EVENT_RESOURCE_STALL, SBFULL), | ||
214 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 366 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
215 | }, | 367 | }, |
216 | [P4_EVENT_WC_BUFFER] = { | 368 | [P4_EVENT_WC_BUFFER] = { |
217 | .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER), | 369 | .opcode = P4_OPCODE(P4_EVENT_WC_BUFFER), |
218 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, | 370 | .escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 }, |
371 | .escr_emask = | ||
372 | P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_EVICTS) | | ||
373 | P4_ESCR_EMASK_BIT(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS), | ||
374 | .shared = 1, | ||
219 | .cntr = { {8, 9, -1}, {10, 11, -1} }, | 375 | .cntr = { {8, 9, -1}, {10, 11, -1} }, |
220 | }, | 376 | }, |
221 | [P4_EVENT_B2B_CYCLES] = { | 377 | [P4_EVENT_B2B_CYCLES] = { |
222 | .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES), | 378 | .opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES), |
223 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 379 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
380 | .escr_emask = 0, | ||
224 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 381 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
225 | }, | 382 | }, |
226 | [P4_EVENT_BNR] = { | 383 | [P4_EVENT_BNR] = { |
227 | .opcode = P4_OPCODE(P4_EVENT_BNR), | 384 | .opcode = P4_OPCODE(P4_EVENT_BNR), |
228 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 385 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
386 | .escr_emask = 0, | ||
229 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 387 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
230 | }, | 388 | }, |
231 | [P4_EVENT_SNOOP] = { | 389 | [P4_EVENT_SNOOP] = { |
232 | .opcode = P4_OPCODE(P4_EVENT_SNOOP), | 390 | .opcode = P4_OPCODE(P4_EVENT_SNOOP), |
233 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 391 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
392 | .escr_emask = 0, | ||
234 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 393 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
235 | }, | 394 | }, |
236 | [P4_EVENT_RESPONSE] = { | 395 | [P4_EVENT_RESPONSE] = { |
237 | .opcode = P4_OPCODE(P4_EVENT_RESPONSE), | 396 | .opcode = P4_OPCODE(P4_EVENT_RESPONSE), |
238 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, | 397 | .escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 }, |
398 | .escr_emask = 0, | ||
239 | .cntr = { {0, -1, -1}, {2, -1, -1} }, | 399 | .cntr = { {0, -1, -1}, {2, -1, -1} }, |
240 | }, | 400 | }, |
241 | [P4_EVENT_FRONT_END_EVENT] = { | 401 | [P4_EVENT_FRONT_END_EVENT] = { |
242 | .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT), | 402 | .opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT), |
243 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 403 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
404 | .escr_emask = | ||
405 | P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, NBOGUS) | | ||
406 | P4_ESCR_EMASK_BIT(P4_EVENT_FRONT_END_EVENT, BOGUS), | ||
244 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 407 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
245 | }, | 408 | }, |
246 | [P4_EVENT_EXECUTION_EVENT] = { | 409 | [P4_EVENT_EXECUTION_EVENT] = { |
247 | .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT), | 410 | .opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT), |
248 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 411 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
412 | .escr_emask = | ||
413 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0) | | ||
414 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1) | | ||
415 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2) | | ||
416 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3) | | ||
417 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) | | ||
418 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) | | ||
419 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) | | ||
420 | P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3), | ||
249 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 421 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
250 | }, | 422 | }, |
251 | [P4_EVENT_REPLAY_EVENT] = { | 423 | [P4_EVENT_REPLAY_EVENT] = { |
252 | .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT), | 424 | .opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT), |
253 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 425 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
426 | .escr_emask = | ||
427 | P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, NBOGUS) | | ||
428 | P4_ESCR_EMASK_BIT(P4_EVENT_REPLAY_EVENT, BOGUS), | ||
254 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 429 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
255 | }, | 430 | }, |
256 | [P4_EVENT_INSTR_RETIRED] = { | 431 | [P4_EVENT_INSTR_RETIRED] = { |
257 | .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED), | 432 | .opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED), |
258 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 433 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
434 | .escr_emask = | ||
435 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) | | ||
436 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSTAG) | | ||
437 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG) | | ||
438 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSTAG), | ||
259 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 439 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
260 | }, | 440 | }, |
261 | [P4_EVENT_UOPS_RETIRED] = { | 441 | [P4_EVENT_UOPS_RETIRED] = { |
262 | .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED), | 442 | .opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED), |
263 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 443 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
444 | .escr_emask = | ||
445 | P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, NBOGUS) | | ||
446 | P4_ESCR_EMASK_BIT(P4_EVENT_UOPS_RETIRED, BOGUS), | ||
264 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 447 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
265 | }, | 448 | }, |
266 | [P4_EVENT_UOP_TYPE] = { | 449 | [P4_EVENT_UOP_TYPE] = { |
267 | .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE), | 450 | .opcode = P4_OPCODE(P4_EVENT_UOP_TYPE), |
268 | .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 }, | 451 | .escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 }, |
452 | .escr_emask = | ||
453 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGLOADS) | | ||
454 | P4_ESCR_EMASK_BIT(P4_EVENT_UOP_TYPE, TAGSTORES), | ||
269 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 455 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
270 | }, | 456 | }, |
271 | [P4_EVENT_BRANCH_RETIRED] = { | 457 | [P4_EVENT_BRANCH_RETIRED] = { |
272 | .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED), | 458 | .opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED), |
273 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 459 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
460 | .escr_emask = | ||
461 | P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNP) | | ||
462 | P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMNM) | | ||
463 | P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTP) | | ||
464 | P4_ESCR_EMASK_BIT(P4_EVENT_BRANCH_RETIRED, MMTM), | ||
274 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 465 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
275 | }, | 466 | }, |
276 | [P4_EVENT_MISPRED_BRANCH_RETIRED] = { | 467 | [P4_EVENT_MISPRED_BRANCH_RETIRED] = { |
277 | .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), | 468 | .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), |
278 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 469 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
470 | .escr_emask = | ||
471 | P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), | ||
279 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 472 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
280 | }, | 473 | }, |
281 | [P4_EVENT_X87_ASSIST] = { | 474 | [P4_EVENT_X87_ASSIST] = { |
282 | .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST), | 475 | .opcode = P4_OPCODE(P4_EVENT_X87_ASSIST), |
283 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 476 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
477 | .escr_emask = | ||
478 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSU) | | ||
479 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, FPSO) | | ||
480 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAO) | | ||
481 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, POAU) | | ||
482 | P4_ESCR_EMASK_BIT(P4_EVENT_X87_ASSIST, PREA), | ||
284 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 483 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
285 | }, | 484 | }, |
286 | [P4_EVENT_MACHINE_CLEAR] = { | 485 | [P4_EVENT_MACHINE_CLEAR] = { |
287 | .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR), | 486 | .opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR), |
288 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, | 487 | .escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 }, |
488 | .escr_emask = | ||
489 | P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, CLEAR) | | ||
490 | P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, MOCLEAR) | | ||
491 | P4_ESCR_EMASK_BIT(P4_EVENT_MACHINE_CLEAR, SMCLEAR), | ||
289 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 492 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
290 | }, | 493 | }, |
291 | [P4_EVENT_INSTR_COMPLETED] = { | 494 | [P4_EVENT_INSTR_COMPLETED] = { |
292 | .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED), | 495 | .opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED), |
293 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, | 496 | .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, |
497 | .escr_emask = | ||
498 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, NBOGUS) | | ||
499 | P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_COMPLETED, BOGUS), | ||
294 | .cntr = { {12, 13, 16}, {14, 15, 17} }, | 500 | .cntr = { {12, 13, 16}, {14, 15, 17} }, |
295 | }, | 501 | }, |
296 | }; | 502 | }; |
@@ -428,29 +634,73 @@ static u64 p4_pmu_event_map(int hw_event) | |||
428 | return config; | 634 | return config; |
429 | } | 635 | } |
430 | 636 | ||
637 | /* check cpu model specifics */ | ||
638 | static bool p4_event_match_cpu_model(unsigned int event_idx) | ||
639 | { | ||
640 | /* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */ | ||
641 | if (event_idx == P4_EVENT_INSTR_COMPLETED) { | ||
642 | if (boot_cpu_data.x86_model != 3 && | ||
643 | boot_cpu_data.x86_model != 4 && | ||
644 | boot_cpu_data.x86_model != 6) | ||
645 | return false; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * For info | ||
650 | * - IQ_ESCR0, IQ_ESCR1 only for models 1 and 2 | ||
651 | */ | ||
652 | |||
653 | return true; | ||
654 | } | ||
655 | |||
431 | static int p4_validate_raw_event(struct perf_event *event) | 656 | static int p4_validate_raw_event(struct perf_event *event) |
432 | { | 657 | { |
433 | unsigned int v; | 658 | unsigned int v, emask; |
434 | 659 | ||
435 | /* user data may have out-of-bound event index */ | 660 | /* User data may have out-of-bound event index */ |
436 | v = p4_config_unpack_event(event->attr.config); | 661 | v = p4_config_unpack_event(event->attr.config); |
437 | if (v >= ARRAY_SIZE(p4_event_bind_map)) { | 662 | if (v >= ARRAY_SIZE(p4_event_bind_map)) |
438 | pr_warning("P4 PMU: Unknown event code: %d\n", v); | 663 | return -EINVAL; |
664 | |||
665 | /* It may be unsupported: */ | ||
666 | if (!p4_event_match_cpu_model(v)) | ||
439 | return -EINVAL; | 667 | return -EINVAL; |
668 | |||
669 | /* | ||
670 | * NOTE: P4_CCCR_THREAD_ANY has not the same meaning as | ||
671 | * in Architectural Performance Monitoring, it means not | ||
672 | * on _which_ logical cpu to count but rather _when_, ie it | ||
673 | * depends on logical cpu state -- count event if one cpu active, | ||
674 | * none, both or any, so we just allow user to pass any value | ||
675 | * desired. | ||
676 | * | ||
677 | * In turn we always set Tx_OS/Tx_USR bits bound to logical | ||
678 | * cpu without their propagation to another cpu | ||
679 | */ | ||
680 | |||
681 | /* | ||
682 | * if an event is shared accross the logical threads | ||
683 | * the user needs special permissions to be able to use it | ||
684 | */ | ||
685 | if (p4_event_bind_map[v].shared) { | ||
686 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
687 | return -EACCES; | ||
440 | } | 688 | } |
441 | 689 | ||
690 | /* ESCR EventMask bits may be invalid */ | ||
691 | emask = p4_config_unpack_escr(event->attr.config) & P4_ESCR_EVENTMASK_MASK; | ||
692 | if (emask & ~p4_event_bind_map[v].escr_emask) | ||
693 | return -EINVAL; | ||
694 | |||
442 | /* | 695 | /* |
443 | * it may have some screwed PEBS bits | 696 | * it may have some invalid PEBS bits |
444 | */ | 697 | */ |
445 | if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) { | 698 | if (p4_config_pebs_has(event->attr.config, P4_PEBS_CONFIG_ENABLE)) |
446 | pr_warning("P4 PMU: PEBS are not supported yet\n"); | ||
447 | return -EINVAL; | 699 | return -EINVAL; |
448 | } | 700 | |
449 | v = p4_config_unpack_metric(event->attr.config); | 701 | v = p4_config_unpack_metric(event->attr.config); |
450 | if (v >= ARRAY_SIZE(p4_pebs_bind_map)) { | 702 | if (v >= ARRAY_SIZE(p4_pebs_bind_map)) |
451 | pr_warning("P4 PMU: Unknown metric code: %d\n", v); | ||
452 | return -EINVAL; | 703 | return -EINVAL; |
453 | } | ||
454 | 704 | ||
455 | return 0; | 705 | return 0; |
456 | } | 706 | } |
@@ -478,27 +728,21 @@ static int p4_hw_config(struct perf_event *event) | |||
478 | 728 | ||
479 | if (event->attr.type == PERF_TYPE_RAW) { | 729 | if (event->attr.type == PERF_TYPE_RAW) { |
480 | 730 | ||
731 | /* | ||
732 | * Clear bits we reserve to be managed by kernel itself | ||
733 | * and never allowed from a user space | ||
734 | */ | ||
735 | event->attr.config &= P4_CONFIG_MASK; | ||
736 | |||
481 | rc = p4_validate_raw_event(event); | 737 | rc = p4_validate_raw_event(event); |
482 | if (rc) | 738 | if (rc) |
483 | goto out; | 739 | goto out; |
484 | 740 | ||
485 | /* | 741 | /* |
486 | * We don't control raw events so it's up to the caller | ||
487 | * to pass sane values (and we don't count the thread number | ||
488 | * on HT machine but allow HT-compatible specifics to be | ||
489 | * passed on) | ||
490 | * | ||
491 | * Note that for RAW events we allow user to use P4_CCCR_RESERVED | 742 | * Note that for RAW events we allow user to use P4_CCCR_RESERVED |
492 | * bits since we keep additional info here (for cache events and etc) | 743 | * bits since we keep additional info here (for cache events and etc) |
493 | * | ||
494 | * XXX: HT wide things should check perf_paranoid_cpu() && | ||
495 | * CAP_SYS_ADMIN | ||
496 | */ | 744 | */ |
497 | event->hw.config |= event->attr.config & | 745 | event->hw.config |= event->attr.config; |
498 | (p4_config_pack_escr(P4_ESCR_MASK_HT) | | ||
499 | p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED)); | ||
500 | |||
501 | event->hw.config &= ~P4_CCCR_FORCE_OVF; | ||
502 | } | 746 | } |
503 | 747 | ||
504 | rc = x86_setup_perfctr(event); | 748 | rc = x86_setup_perfctr(event); |
@@ -660,8 +904,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
660 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 904 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
661 | int overflow; | 905 | int overflow; |
662 | 906 | ||
663 | if (!test_bit(idx, cpuc->active_mask)) | 907 | if (!test_bit(idx, cpuc->active_mask)) { |
908 | /* catch in-flight IRQs */ | ||
909 | if (__test_and_clear_bit(idx, cpuc->running)) | ||
910 | handled++; | ||
664 | continue; | 911 | continue; |
912 | } | ||
665 | 913 | ||
666 | event = cpuc->events[idx]; | 914 | event = cpuc->events[idx]; |
667 | hwc = &event->hw; | 915 | hwc = &event->hw; |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 34b4dad6f0b8..d49079515122 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
31 | const struct cpuid_bit *cb; | 31 | const struct cpuid_bit *cb; |
32 | 32 | ||
33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | 33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
34 | { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 }, | ||
34 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, | 35 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, |
35 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, | 36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, |
36 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, | 37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index cd37469b54ee..3afb33f14d2d 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -257,14 +257,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code) | |||
257 | return mod_code_status; | 257 | return mod_code_status; |
258 | } | 258 | } |
259 | 259 | ||
260 | |||
261 | |||
262 | |||
263 | static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; | ||
264 | |||
265 | static unsigned char *ftrace_nop_replace(void) | 260 | static unsigned char *ftrace_nop_replace(void) |
266 | { | 261 | { |
267 | return ftrace_nop; | 262 | return ideal_nop5; |
268 | } | 263 | } |
269 | 264 | ||
270 | static int | 265 | static int |
@@ -338,62 +333,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) | |||
338 | 333 | ||
339 | int __init ftrace_dyn_arch_init(void *data) | 334 | int __init ftrace_dyn_arch_init(void *data) |
340 | { | 335 | { |
341 | extern const unsigned char ftrace_test_p6nop[]; | ||
342 | extern const unsigned char ftrace_test_nop5[]; | ||
343 | extern const unsigned char ftrace_test_jmp[]; | ||
344 | int faulted = 0; | ||
345 | |||
346 | /* | ||
347 | * There is no good nop for all x86 archs. | ||
348 | * We will default to using the P6_NOP5, but first we | ||
349 | * will test to make sure that the nop will actually | ||
350 | * work on this CPU. If it faults, we will then | ||
351 | * go to a lesser efficient 5 byte nop. If that fails | ||
352 | * we then just use a jmp as our nop. This isn't the most | ||
353 | * efficient nop, but we can not use a multi part nop | ||
354 | * since we would then risk being preempted in the middle | ||
355 | * of that nop, and if we enabled tracing then, it might | ||
356 | * cause a system crash. | ||
357 | * | ||
358 | * TODO: check the cpuid to determine the best nop. | ||
359 | */ | ||
360 | asm volatile ( | ||
361 | "ftrace_test_jmp:" | ||
362 | "jmp ftrace_test_p6nop\n" | ||
363 | "nop\n" | ||
364 | "nop\n" | ||
365 | "nop\n" /* 2 byte jmp + 3 bytes */ | ||
366 | "ftrace_test_p6nop:" | ||
367 | P6_NOP5 | ||
368 | "jmp 1f\n" | ||
369 | "ftrace_test_nop5:" | ||
370 | ".byte 0x66,0x66,0x66,0x66,0x90\n" | ||
371 | "1:" | ||
372 | ".section .fixup, \"ax\"\n" | ||
373 | "2: movl $1, %0\n" | ||
374 | " jmp ftrace_test_nop5\n" | ||
375 | "3: movl $2, %0\n" | ||
376 | " jmp 1b\n" | ||
377 | ".previous\n" | ||
378 | _ASM_EXTABLE(ftrace_test_p6nop, 2b) | ||
379 | _ASM_EXTABLE(ftrace_test_nop5, 3b) | ||
380 | : "=r"(faulted) : "0" (faulted)); | ||
381 | |||
382 | switch (faulted) { | ||
383 | case 0: | ||
384 | pr_info("converting mcount calls to 0f 1f 44 00 00\n"); | ||
385 | memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); | ||
386 | break; | ||
387 | case 1: | ||
388 | pr_info("converting mcount calls to 66 66 66 66 90\n"); | ||
389 | memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); | ||
390 | break; | ||
391 | case 2: | ||
392 | pr_info("converting mcount calls to jmp . + 5\n"); | ||
393 | memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); | ||
394 | break; | ||
395 | } | ||
396 | |||
397 | /* The return code is retured via data */ | 336 | /* The return code is retured via data */ |
398 | *(unsigned long *)data = 0; | 337 | *(unsigned long *)data = 0; |
399 | 338 | ||
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 410fdb3f1939..7494999141b3 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -506,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) | |||
506 | { | 506 | { |
507 | unsigned int irq; | 507 | unsigned int irq; |
508 | 508 | ||
509 | irq = create_irq(); | 509 | irq = create_irq_nr(0, -1); |
510 | if (!irq) | 510 | if (!irq) |
511 | return -EINVAL; | 511 | return -EINVAL; |
512 | 512 | ||
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c new file mode 100644 index 000000000000..961b6b30ba90 --- /dev/null +++ b/arch/x86/kernel/jump_label.c | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * jump label x86 support | ||
3 | * | ||
4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> | ||
5 | * | ||
6 | */ | ||
7 | #include <linux/jump_label.h> | ||
8 | #include <linux/memory.h> | ||
9 | #include <linux/uaccess.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/jhash.h> | ||
13 | #include <linux/cpu.h> | ||
14 | #include <asm/kprobes.h> | ||
15 | #include <asm/alternative.h> | ||
16 | |||
17 | #ifdef HAVE_JUMP_LABEL | ||
18 | |||
19 | union jump_code_union { | ||
20 | char code[JUMP_LABEL_NOP_SIZE]; | ||
21 | struct { | ||
22 | char jump; | ||
23 | int offset; | ||
24 | } __attribute__((packed)); | ||
25 | }; | ||
26 | |||
27 | void arch_jump_label_transform(struct jump_entry *entry, | ||
28 | enum jump_label_type type) | ||
29 | { | ||
30 | union jump_code_union code; | ||
31 | |||
32 | if (type == JUMP_LABEL_ENABLE) { | ||
33 | code.jump = 0xe9; | ||
34 | code.offset = entry->target - | ||
35 | (entry->code + JUMP_LABEL_NOP_SIZE); | ||
36 | } else | ||
37 | memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE); | ||
38 | get_online_cpus(); | ||
39 | mutex_lock(&text_mutex); | ||
40 | text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); | ||
41 | mutex_unlock(&text_mutex); | ||
42 | put_online_cpus(); | ||
43 | } | ||
44 | |||
45 | void arch_jump_label_text_poke_early(jump_label_t addr) | ||
46 | { | ||
47 | text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE); | ||
48 | } | ||
49 | |||
50 | #endif | ||
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 770ebfb349e9..1cbd54c0df99 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c | |||
@@ -230,9 +230,6 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) | |||
230 | return 0; | 230 | return 0; |
231 | } | 231 | } |
232 | 232 | ||
233 | /* Dummy buffers for kallsyms_lookup */ | ||
234 | static char __dummy_buf[KSYM_NAME_LEN]; | ||
235 | |||
236 | /* Check if paddr is at an instruction boundary */ | 233 | /* Check if paddr is at an instruction boundary */ |
237 | static int __kprobes can_probe(unsigned long paddr) | 234 | static int __kprobes can_probe(unsigned long paddr) |
238 | { | 235 | { |
@@ -241,7 +238,7 @@ static int __kprobes can_probe(unsigned long paddr) | |||
241 | struct insn insn; | 238 | struct insn insn; |
242 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 239 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
243 | 240 | ||
244 | if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf)) | 241 | if (!kallsyms_lookup_size_offset(paddr, NULL, &offset)) |
245 | return 0; | 242 | return 0; |
246 | 243 | ||
247 | /* Decode instructions */ | 244 | /* Decode instructions */ |
@@ -1129,7 +1126,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, | |||
1129 | *(unsigned long *)addr = val; | 1126 | *(unsigned long *)addr = val; |
1130 | } | 1127 | } |
1131 | 1128 | ||
1132 | void __kprobes kprobes_optinsn_template_holder(void) | 1129 | static void __used __kprobes kprobes_optinsn_template_holder(void) |
1133 | { | 1130 | { |
1134 | asm volatile ( | 1131 | asm volatile ( |
1135 | ".global optprobe_template_entry\n" | 1132 | ".global optprobe_template_entry\n" |
@@ -1221,7 +1218,8 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | |||
1221 | } | 1218 | } |
1222 | /* Check whether the address range is reserved */ | 1219 | /* Check whether the address range is reserved */ |
1223 | if (ftrace_text_reserved(src, src + len - 1) || | 1220 | if (ftrace_text_reserved(src, src + len - 1) || |
1224 | alternatives_text_reserved(src, src + len - 1)) | 1221 | alternatives_text_reserved(src, src + len - 1) || |
1222 | jump_label_text_reserved(src, src + len - 1)) | ||
1225 | return -EBUSY; | 1223 | return -EBUSY; |
1226 | 1224 | ||
1227 | return len; | 1225 | return len; |
@@ -1269,11 +1267,9 @@ static int __kprobes can_optimize(unsigned long paddr) | |||
1269 | unsigned long addr, size = 0, offset = 0; | 1267 | unsigned long addr, size = 0, offset = 0; |
1270 | struct insn insn; | 1268 | struct insn insn; |
1271 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 1269 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
1272 | /* Dummy buffers for lookup_symbol_attrs */ | ||
1273 | static char __dummy_buf[KSYM_NAME_LEN]; | ||
1274 | 1270 | ||
1275 | /* Lookup symbol including addr */ | 1271 | /* Lookup symbol including addr */ |
1276 | if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf)) | 1272 | if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) |
1277 | return 0; | 1273 | return 0; |
1278 | 1274 | ||
1279 | /* Check there is enough space for a relative jump. */ | 1275 | /* Check there is enough space for a relative jump. */ |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index e0bc186d7501..8f2956091735 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -239,11 +239,13 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
239 | apply_paravirt(pseg, pseg + para->sh_size); | 239 | apply_paravirt(pseg, pseg + para->sh_size); |
240 | } | 240 | } |
241 | 241 | ||
242 | return module_bug_finalize(hdr, sechdrs, me); | 242 | /* make jump label nops */ |
243 | jump_label_apply_nops(me); | ||
244 | |||
245 | return 0; | ||
243 | } | 246 | } |
244 | 247 | ||
245 | void module_arch_cleanup(struct module *mod) | 248 | void module_arch_cleanup(struct module *mod) |
246 | { | 249 | { |
247 | alternatives_smp_module_del(mod); | 250 | alternatives_smp_module_del(mod); |
248 | module_bug_cleanup(mod); | ||
249 | } | 251 | } |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c3a4fbb2b996..00e167870f71 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -112,6 +112,7 @@ | |||
112 | #include <asm/numa_64.h> | 112 | #include <asm/numa_64.h> |
113 | #endif | 113 | #endif |
114 | #include <asm/mce.h> | 114 | #include <asm/mce.h> |
115 | #include <asm/alternative.h> | ||
115 | 116 | ||
116 | /* | 117 | /* |
117 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | 118 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. |
@@ -726,6 +727,7 @@ void __init setup_arch(char **cmdline_p) | |||
726 | { | 727 | { |
727 | int acpi = 0; | 728 | int acpi = 0; |
728 | int k8 = 0; | 729 | int k8 = 0; |
730 | unsigned long flags; | ||
729 | 731 | ||
730 | #ifdef CONFIG_X86_32 | 732 | #ifdef CONFIG_X86_32 |
731 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); | 733 | memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); |
@@ -1071,6 +1073,10 @@ void __init setup_arch(char **cmdline_p) | |||
1071 | x86_init.oem.banner(); | 1073 | x86_init.oem.banner(); |
1072 | 1074 | ||
1073 | mcheck_init(); | 1075 | mcheck_init(); |
1076 | |||
1077 | local_irq_save(flags); | ||
1078 | arch_init_ideal_nop5(); | ||
1079 | local_irq_restore(flags); | ||
1074 | } | 1080 | } |
1075 | 1081 | ||
1076 | #ifdef CONFIG_X86_32 | 1082 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 4c4508e8a204..a24c6cfdccc4 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -251,6 +251,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) | |||
251 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | 251 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) |
252 | return -1; | 252 | return -1; |
253 | 253 | ||
254 | WARN_ON_ONCE(in_nmi()); | ||
255 | |||
254 | /* | 256 | /* |
255 | * Synchronize this task's top level page-table | 257 | * Synchronize this task's top level page-table |
256 | * with the 'reference' page table. | 258 | * with the 'reference' page table. |
@@ -369,6 +371,8 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) | |||
369 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | 371 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) |
370 | return -1; | 372 | return -1; |
371 | 373 | ||
374 | WARN_ON_ONCE(in_nmi()); | ||
375 | |||
372 | /* | 376 | /* |
373 | * Copy kernel mappings over when needed. This can also | 377 | * Copy kernel mappings over when needed. This can also |
374 | * happen within a race in page table update. In the later | 378 | * happen within a race in page table update. In the later |
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index b3b531a4f8e5..d87dd6d042d6 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c | |||
@@ -631,6 +631,8 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, | |||
631 | if (!pte) | 631 | if (!pte) |
632 | return false; | 632 | return false; |
633 | 633 | ||
634 | WARN_ON_ONCE(in_nmi()); | ||
635 | |||
634 | if (error_code & 2) | 636 | if (error_code & 2) |
635 | kmemcheck_access(regs, address, KMEMCHECK_WRITE); | 637 | kmemcheck_access(regs, address, KMEMCHECK_WRITE); |
636 | else | 638 | else |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 1a5353a753fc..b2bb5aa3b054 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -489,8 +489,9 @@ static void xen_hvm_setup_cpu_clockevents(void) | |||
489 | __init void xen_hvm_init_time_ops(void) | 489 | __init void xen_hvm_init_time_ops(void) |
490 | { | 490 | { |
491 | /* vector callback is needed otherwise we cannot receive interrupts | 491 | /* vector callback is needed otherwise we cannot receive interrupts |
492 | * on cpu > 0 */ | 492 | * on cpu > 0 and at this point we don't know how many cpus are |
493 | if (!xen_have_vector_callback && num_present_cpus() > 1) | 493 | * available */ |
494 | if (!xen_have_vector_callback) | ||
494 | return; | 495 | return; |
495 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { | 496 | if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { |
496 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," | 497 | printk(KERN_INFO "Xen doesn't support pvclock on HVM," |