aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-18 19:52:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-18 19:52:46 -0400
commitf82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240 (patch)
tree09fc553c2fb6f527962048d139159dc139e04afc /arch/x86/kernel
parentc6b9e73f2fee8bb86058f296de808b326473456b (diff)
parentdcd5c1662db59a6b82942f47fb6ac9dd63f6d3dd (diff)
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits) perf: Fix unexported generic perf_arch_fetch_caller_regs perf record: Don't try to find buildids in a zero sized file perf: export perf_trace_regs and perf_arch_fetch_caller_regs perf, x86: Fix hw_perf_enable() event assignment perf, ppc: Fix compile error due to new cpu notifiers perf: Make the install relative to DESTDIR if specified kprobes: Calculate the index correctly when freeing the out-of-line execution slot perf tools: Fix sparse CPU numbering related bugs perf_event: Fix oops triggered by cpu offline/online perf: Drop the obsolete profile naming for trace events perf: Take a hot regs snapshot for trace events perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot perf/x86-64: Use frame pointer to walk on irq and process stacks lockdep: Move lock events under lockdep recursion protection perf report: Print the map table just after samples for which no map was found perf report: Add multiple event support perf session: Change perf_session post processing functions to take histogram tree perf session: Add storage for seperating event types in report perf session: Change add_hist_entry to take the tree root instead of session perf record: Add ID and to recorded event data when recording multiple events ...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c183
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c62
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c57
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c10
-rw-r--r--arch/x86/kernel/dumpstack.h15
-rw-r--r--arch/x86/kernel/dumpstack_64.c4
6 files changed, 176 insertions, 155 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 42aafd11e170..60398a0d947c 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -133,8 +133,8 @@ struct x86_pmu {
133 int (*handle_irq)(struct pt_regs *); 133 int (*handle_irq)(struct pt_regs *);
134 void (*disable_all)(void); 134 void (*disable_all)(void);
135 void (*enable_all)(void); 135 void (*enable_all)(void);
136 void (*enable)(struct hw_perf_event *, int); 136 void (*enable)(struct perf_event *);
137 void (*disable)(struct hw_perf_event *, int); 137 void (*disable)(struct perf_event *);
138 unsigned eventsel; 138 unsigned eventsel;
139 unsigned perfctr; 139 unsigned perfctr;
140 u64 (*event_map)(int); 140 u64 (*event_map)(int);
@@ -157,6 +157,11 @@ struct x86_pmu {
157 void (*put_event_constraints)(struct cpu_hw_events *cpuc, 157 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
158 struct perf_event *event); 158 struct perf_event *event);
159 struct event_constraint *event_constraints; 159 struct event_constraint *event_constraints;
160
161 void (*cpu_prepare)(int cpu);
162 void (*cpu_starting)(int cpu);
163 void (*cpu_dying)(int cpu);
164 void (*cpu_dead)(int cpu);
160}; 165};
161 166
162static struct x86_pmu x86_pmu __read_mostly; 167static struct x86_pmu x86_pmu __read_mostly;
@@ -165,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
165 .enabled = 1, 170 .enabled = 1,
166}; 171};
167 172
168static int x86_perf_event_set_period(struct perf_event *event, 173static int x86_perf_event_set_period(struct perf_event *event);
169 struct hw_perf_event *hwc, int idx);
170 174
171/* 175/*
172 * Generalized hw caching related hw_event table, filled 176 * Generalized hw caching related hw_event table, filled
@@ -189,11 +193,12 @@ static u64 __read_mostly hw_cache_event_ids
189 * Returns the delta events processed. 193 * Returns the delta events processed.
190 */ 194 */
191static u64 195static u64
192x86_perf_event_update(struct perf_event *event, 196x86_perf_event_update(struct perf_event *event)
193 struct hw_perf_event *hwc, int idx)
194{ 197{
198 struct hw_perf_event *hwc = &event->hw;
195 int shift = 64 - x86_pmu.event_bits; 199 int shift = 64 - x86_pmu.event_bits;
196 u64 prev_raw_count, new_raw_count; 200 u64 prev_raw_count, new_raw_count;
201 int idx = hwc->idx;
197 s64 delta; 202 s64 delta;
198 203
199 if (idx == X86_PMC_IDX_FIXED_BTS) 204 if (idx == X86_PMC_IDX_FIXED_BTS)
@@ -293,7 +298,7 @@ static inline bool bts_available(void)
293 return x86_pmu.enable_bts != NULL; 298 return x86_pmu.enable_bts != NULL;
294} 299}
295 300
296static inline void init_debug_store_on_cpu(int cpu) 301static void init_debug_store_on_cpu(int cpu)
297{ 302{
298 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 303 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
299 304
@@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu)
305 (u32)((u64)(unsigned long)ds >> 32)); 310 (u32)((u64)(unsigned long)ds >> 32));
306} 311}
307 312
308static inline void fini_debug_store_on_cpu(int cpu) 313static void fini_debug_store_on_cpu(int cpu)
309{ 314{
310 if (!per_cpu(cpu_hw_events, cpu).ds) 315 if (!per_cpu(cpu_hw_events, cpu).ds)
311 return; 316 return;
@@ -638,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
638 if (test_bit(hwc->idx, used_mask)) 643 if (test_bit(hwc->idx, used_mask))
639 break; 644 break;
640 645
641 set_bit(hwc->idx, used_mask); 646 __set_bit(hwc->idx, used_mask);
642 if (assign) 647 if (assign)
643 assign[i] = hwc->idx; 648 assign[i] = hwc->idx;
644 } 649 }
@@ -687,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
687 if (j == X86_PMC_IDX_MAX) 692 if (j == X86_PMC_IDX_MAX)
688 break; 693 break;
689 694
690 set_bit(j, used_mask); 695 __set_bit(j, used_mask);
691 696
692 if (assign) 697 if (assign)
693 assign[i] = j; 698 assign[i] = j;
@@ -780,6 +785,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
780 hwc->last_tag == cpuc->tags[i]; 785 hwc->last_tag == cpuc->tags[i];
781} 786}
782 787
788static int x86_pmu_start(struct perf_event *event);
783static void x86_pmu_stop(struct perf_event *event); 789static void x86_pmu_stop(struct perf_event *event);
784 790
785void hw_perf_enable(void) 791void hw_perf_enable(void)
@@ -796,6 +802,7 @@ void hw_perf_enable(void)
796 return; 802 return;
797 803
798 if (cpuc->n_added) { 804 if (cpuc->n_added) {
805 int n_running = cpuc->n_events - cpuc->n_added;
799 /* 806 /*
800 * apply assignment obtained either from 807 * apply assignment obtained either from
801 * hw_perf_group_sched_in() or x86_pmu_enable() 808 * hw_perf_group_sched_in() or x86_pmu_enable()
@@ -803,8 +810,7 @@ void hw_perf_enable(void)
803 * step1: save events moving to new counters 810 * step1: save events moving to new counters
804 * step2: reprogram moved events into new counters 811 * step2: reprogram moved events into new counters
805 */ 812 */
806 for (i = 0; i < cpuc->n_events; i++) { 813 for (i = 0; i < n_running; i++) {
807
808 event = cpuc->event_list[i]; 814 event = cpuc->event_list[i];
809 hwc = &event->hw; 815 hwc = &event->hw;
810 816
@@ -819,29 +825,18 @@ void hw_perf_enable(void)
819 continue; 825 continue;
820 826
821 x86_pmu_stop(event); 827 x86_pmu_stop(event);
822
823 hwc->idx = -1;
824 } 828 }
825 829
826 for (i = 0; i < cpuc->n_events; i++) { 830 for (i = 0; i < cpuc->n_events; i++) {
827
828 event = cpuc->event_list[i]; 831 event = cpuc->event_list[i];
829 hwc = &event->hw; 832 hwc = &event->hw;
830 833
831 if (hwc->idx == -1) { 834 if (!match_prev_assignment(hwc, cpuc, i))
832 x86_assign_hw_event(event, cpuc, i); 835 x86_assign_hw_event(event, cpuc, i);
833 x86_perf_event_set_period(event, hwc, hwc->idx); 836 else if (i < n_running)
834 } 837 continue;
835 /*
836 * need to mark as active because x86_pmu_disable()
837 * clear active_mask and events[] yet it preserves
838 * idx
839 */
840 set_bit(hwc->idx, cpuc->active_mask);
841 cpuc->events[hwc->idx] = event;
842 838
843 x86_pmu.enable(hwc, hwc->idx); 839 x86_pmu_start(event);
844 perf_event_update_userpage(event);
845 } 840 }
846 cpuc->n_added = 0; 841 cpuc->n_added = 0;
847 perf_events_lapic_init(); 842 perf_events_lapic_init();
@@ -853,15 +848,16 @@ void hw_perf_enable(void)
853 x86_pmu.enable_all(); 848 x86_pmu.enable_all();
854} 849}
855 850
856static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 851static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
857{ 852{
858 (void)checking_wrmsrl(hwc->config_base + idx, 853 (void)checking_wrmsrl(hwc->config_base + hwc->idx,
859 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); 854 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
860} 855}
861 856
862static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) 857static inline void x86_pmu_disable_event(struct perf_event *event)
863{ 858{
864 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); 859 struct hw_perf_event *hwc = &event->hw;
860 (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
865} 861}
866 862
867static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 863static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -871,12 +867,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
871 * To be called with the event disabled in hw: 867 * To be called with the event disabled in hw:
872 */ 868 */
873static int 869static int
874x86_perf_event_set_period(struct perf_event *event, 870x86_perf_event_set_period(struct perf_event *event)
875 struct hw_perf_event *hwc, int idx)
876{ 871{
872 struct hw_perf_event *hwc = &event->hw;
877 s64 left = atomic64_read(&hwc->period_left); 873 s64 left = atomic64_read(&hwc->period_left);
878 s64 period = hwc->sample_period; 874 s64 period = hwc->sample_period;
879 int err, ret = 0; 875 int err, ret = 0, idx = hwc->idx;
880 876
881 if (idx == X86_PMC_IDX_FIXED_BTS) 877 if (idx == X86_PMC_IDX_FIXED_BTS)
882 return 0; 878 return 0;
@@ -922,11 +918,11 @@ x86_perf_event_set_period(struct perf_event *event,
922 return ret; 918 return ret;
923} 919}
924 920
925static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 921static void x86_pmu_enable_event(struct perf_event *event)
926{ 922{
927 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 923 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
928 if (cpuc->enabled) 924 if (cpuc->enabled)
929 __x86_pmu_enable_event(hwc, idx); 925 __x86_pmu_enable_event(&event->hw);
930} 926}
931 927
932/* 928/*
@@ -962,34 +958,32 @@ static int x86_pmu_enable(struct perf_event *event)
962 memcpy(cpuc->assign, assign, n*sizeof(int)); 958 memcpy(cpuc->assign, assign, n*sizeof(int));
963 959
964 cpuc->n_events = n; 960 cpuc->n_events = n;
965 cpuc->n_added = n - n0; 961 cpuc->n_added += n - n0;
966 962
967 return 0; 963 return 0;
968} 964}
969 965
970static int x86_pmu_start(struct perf_event *event) 966static int x86_pmu_start(struct perf_event *event)
971{ 967{
972 struct hw_perf_event *hwc = &event->hw; 968 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
969 int idx = event->hw.idx;
973 970
974 if (hwc->idx == -1) 971 if (idx == -1)
975 return -EAGAIN; 972 return -EAGAIN;
976 973
977 x86_perf_event_set_period(event, hwc, hwc->idx); 974 x86_perf_event_set_period(event);
978 x86_pmu.enable(hwc, hwc->idx); 975 cpuc->events[idx] = event;
976 __set_bit(idx, cpuc->active_mask);
977 x86_pmu.enable(event);
978 perf_event_update_userpage(event);
979 979
980 return 0; 980 return 0;
981} 981}
982 982
983static void x86_pmu_unthrottle(struct perf_event *event) 983static void x86_pmu_unthrottle(struct perf_event *event)
984{ 984{
985 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 985 int ret = x86_pmu_start(event);
986 struct hw_perf_event *hwc = &event->hw; 986 WARN_ON_ONCE(ret);
987
988 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
989 cpuc->events[hwc->idx] != event))
990 return;
991
992 x86_pmu.enable(hwc, hwc->idx);
993} 987}
994 988
995void perf_event_print_debug(void) 989void perf_event_print_debug(void)
@@ -1049,18 +1043,16 @@ static void x86_pmu_stop(struct perf_event *event)
1049 struct hw_perf_event *hwc = &event->hw; 1043 struct hw_perf_event *hwc = &event->hw;
1050 int idx = hwc->idx; 1044 int idx = hwc->idx;
1051 1045
1052 /* 1046 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1053 * Must be done before we disable, otherwise the nmi handler 1047 return;
1054 * could reenable again: 1048
1055 */ 1049 x86_pmu.disable(event);
1056 clear_bit(idx, cpuc->active_mask);
1057 x86_pmu.disable(hwc, idx);
1058 1050
1059 /* 1051 /*
1060 * Drain the remaining delta count out of a event 1052 * Drain the remaining delta count out of a event
1061 * that we are disabling: 1053 * that we are disabling:
1062 */ 1054 */
1063 x86_perf_event_update(event, hwc, idx); 1055 x86_perf_event_update(event);
1064 1056
1065 cpuc->events[idx] = NULL; 1057 cpuc->events[idx] = NULL;
1066} 1058}
@@ -1108,7 +1100,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1108 event = cpuc->events[idx]; 1100 event = cpuc->events[idx];
1109 hwc = &event->hw; 1101 hwc = &event->hw;
1110 1102
1111 val = x86_perf_event_update(event, hwc, idx); 1103 val = x86_perf_event_update(event);
1112 if (val & (1ULL << (x86_pmu.event_bits - 1))) 1104 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1113 continue; 1105 continue;
1114 1106
@@ -1118,11 +1110,11 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1118 handled = 1; 1110 handled = 1;
1119 data.period = event->hw.last_period; 1111 data.period = event->hw.last_period;
1120 1112
1121 if (!x86_perf_event_set_period(event, hwc, idx)) 1113 if (!x86_perf_event_set_period(event))
1122 continue; 1114 continue;
1123 1115
1124 if (perf_event_overflow(event, 1, &data, regs)) 1116 if (perf_event_overflow(event, 1, &data, regs))
1125 x86_pmu.disable(hwc, idx); 1117 x86_pmu_stop(event);
1126 } 1118 }
1127 1119
1128 if (handled) 1120 if (handled)
@@ -1309,7 +1301,7 @@ int hw_perf_group_sched_in(struct perf_event *leader,
1309 memcpy(cpuc->assign, assign, n0*sizeof(int)); 1301 memcpy(cpuc->assign, assign, n0*sizeof(int));
1310 1302
1311 cpuc->n_events = n0; 1303 cpuc->n_events = n0;
1312 cpuc->n_added = n1; 1304 cpuc->n_added += n1;
1313 ctx->nr_active += n1; 1305 ctx->nr_active += n1;
1314 1306
1315 /* 1307 /*
@@ -1337,6 +1329,39 @@ undo:
1337#include "perf_event_p6.c" 1329#include "perf_event_p6.c"
1338#include "perf_event_intel.c" 1330#include "perf_event_intel.c"
1339 1331
1332static int __cpuinit
1333x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1334{
1335 unsigned int cpu = (long)hcpu;
1336
1337 switch (action & ~CPU_TASKS_FROZEN) {
1338 case CPU_UP_PREPARE:
1339 if (x86_pmu.cpu_prepare)
1340 x86_pmu.cpu_prepare(cpu);
1341 break;
1342
1343 case CPU_STARTING:
1344 if (x86_pmu.cpu_starting)
1345 x86_pmu.cpu_starting(cpu);
1346 break;
1347
1348 case CPU_DYING:
1349 if (x86_pmu.cpu_dying)
1350 x86_pmu.cpu_dying(cpu);
1351 break;
1352
1353 case CPU_DEAD:
1354 if (x86_pmu.cpu_dead)
1355 x86_pmu.cpu_dead(cpu);
1356 break;
1357
1358 default:
1359 break;
1360 }
1361
1362 return NOTIFY_OK;
1363}
1364
1340static void __init pmu_check_apic(void) 1365static void __init pmu_check_apic(void)
1341{ 1366{
1342 if (cpu_has_apic) 1367 if (cpu_has_apic)
@@ -1415,11 +1440,13 @@ void __init init_hw_perf_events(void)
1415 pr_info("... max period: %016Lx\n", x86_pmu.max_period); 1440 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1416 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); 1441 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
1417 pr_info("... event mask: %016Lx\n", perf_event_mask); 1442 pr_info("... event mask: %016Lx\n", perf_event_mask);
1443
1444 perf_cpu_notifier(x86_pmu_notifier);
1418} 1445}
1419 1446
1420static inline void x86_pmu_read(struct perf_event *event) 1447static inline void x86_pmu_read(struct perf_event *event)
1421{ 1448{
1422 x86_perf_event_update(event, &event->hw, event->hw.idx); 1449 x86_perf_event_update(event);
1423} 1450}
1424 1451
1425static const struct pmu pmu = { 1452static const struct pmu pmu = {
@@ -1675,28 +1702,16 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1675 return entry; 1702 return entry;
1676} 1703}
1677 1704
1678void hw_perf_event_setup_online(int cpu) 1705#ifdef CONFIG_EVENT_TRACING
1706void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1679{ 1707{
1680 init_debug_store_on_cpu(cpu); 1708 regs->ip = ip;
1681 1709 /*
1682 switch (boot_cpu_data.x86_vendor) { 1710 * perf_arch_fetch_caller_regs adds another call, we need to increment
1683 case X86_VENDOR_AMD: 1711 * the skip level
1684 amd_pmu_cpu_online(cpu); 1712 */
1685 break; 1713 regs->bp = rewind_frame_pointer(skip + 1);
1686 default: 1714 regs->cs = __KERNEL_CS;
1687 return; 1715 local_save_flags(regs->flags);
1688 }
1689}
1690
1691void hw_perf_event_setup_offline(int cpu)
1692{
1693 init_debug_store_on_cpu(cpu);
1694
1695 switch (boot_cpu_data.x86_vendor) {
1696 case X86_VENDOR_AMD:
1697 amd_pmu_cpu_offline(cpu);
1698 break;
1699 default:
1700 return;
1701 }
1702} 1716}
1717#endif
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 8f3dbfda3c4f..573458f1caf2 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -271,28 +271,6 @@ done:
271 return &emptyconstraint; 271 return &emptyconstraint;
272} 272}
273 273
274static __initconst struct x86_pmu amd_pmu = {
275 .name = "AMD",
276 .handle_irq = x86_pmu_handle_irq,
277 .disable_all = x86_pmu_disable_all,
278 .enable_all = x86_pmu_enable_all,
279 .enable = x86_pmu_enable_event,
280 .disable = x86_pmu_disable_event,
281 .eventsel = MSR_K7_EVNTSEL0,
282 .perfctr = MSR_K7_PERFCTR0,
283 .event_map = amd_pmu_event_map,
284 .raw_event = amd_pmu_raw_event,
285 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
286 .num_events = 4,
287 .event_bits = 48,
288 .event_mask = (1ULL << 48) - 1,
289 .apic = 1,
290 /* use highest bit to detect overflow */
291 .max_period = (1ULL << 47) - 1,
292 .get_event_constraints = amd_get_event_constraints,
293 .put_event_constraints = amd_put_event_constraints
294};
295
296static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) 274static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
297{ 275{
298 struct amd_nb *nb; 276 struct amd_nb *nb;
@@ -309,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
309 * initialize all possible NB constraints 287 * initialize all possible NB constraints
310 */ 288 */
311 for (i = 0; i < x86_pmu.num_events; i++) { 289 for (i = 0; i < x86_pmu.num_events; i++) {
312 set_bit(i, nb->event_constraints[i].idxmsk); 290 __set_bit(i, nb->event_constraints[i].idxmsk);
313 nb->event_constraints[i].weight = 1; 291 nb->event_constraints[i].weight = 1;
314 } 292 }
315 return nb; 293 return nb;
@@ -378,6 +356,31 @@ static void amd_pmu_cpu_offline(int cpu)
378 raw_spin_unlock(&amd_nb_lock); 356 raw_spin_unlock(&amd_nb_lock);
379} 357}
380 358
359static __initconst struct x86_pmu amd_pmu = {
360 .name = "AMD",
361 .handle_irq = x86_pmu_handle_irq,
362 .disable_all = x86_pmu_disable_all,
363 .enable_all = x86_pmu_enable_all,
364 .enable = x86_pmu_enable_event,
365 .disable = x86_pmu_disable_event,
366 .eventsel = MSR_K7_EVNTSEL0,
367 .perfctr = MSR_K7_PERFCTR0,
368 .event_map = amd_pmu_event_map,
369 .raw_event = amd_pmu_raw_event,
370 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
371 .num_events = 4,
372 .event_bits = 48,
373 .event_mask = (1ULL << 48) - 1,
374 .apic = 1,
375 /* use highest bit to detect overflow */
376 .max_period = (1ULL << 47) - 1,
377 .get_event_constraints = amd_get_event_constraints,
378 .put_event_constraints = amd_put_event_constraints,
379
380 .cpu_prepare = amd_pmu_cpu_online,
381 .cpu_dead = amd_pmu_cpu_offline,
382};
383
381static __init int amd_pmu_init(void) 384static __init int amd_pmu_init(void)
382{ 385{
383 /* Performance-monitoring supported from K7 and later: */ 386 /* Performance-monitoring supported from K7 and later: */
@@ -390,11 +393,6 @@ static __init int amd_pmu_init(void)
390 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 393 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
391 sizeof(hw_cache_event_ids)); 394 sizeof(hw_cache_event_ids));
392 395
393 /*
394 * explicitly initialize the boot cpu, other cpus will get
395 * the cpu hotplug callbacks from smp_init()
396 */
397 amd_pmu_cpu_online(smp_processor_id());
398 return 0; 396 return 0;
399} 397}
400 398
@@ -405,12 +403,4 @@ static int amd_pmu_init(void)
405 return 0; 403 return 0;
406} 404}
407 405
408static void amd_pmu_cpu_online(int cpu)
409{
410}
411
412static void amd_pmu_cpu_offline(int cpu)
413{
414}
415
416#endif 406#endif
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 44b60c852107..84bfde64a337 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack)
548} 548}
549 549
550static inline void 550static inline void
551intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) 551intel_pmu_disable_fixed(struct hw_perf_event *hwc)
552{ 552{
553 int idx = __idx - X86_PMC_IDX_FIXED; 553 int idx = hwc->idx - X86_PMC_IDX_FIXED;
554 u64 ctrl_val, mask; 554 u64 ctrl_val, mask;
555 555
556 mask = 0xfULL << (idx * 4); 556 mask = 0xfULL << (idx * 4);
@@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void)
621} 621}
622 622
623static inline void 623static inline void
624intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) 624intel_pmu_disable_event(struct perf_event *event)
625{ 625{
626 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 626 struct hw_perf_event *hwc = &event->hw;
627
628 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
627 intel_pmu_disable_bts(); 629 intel_pmu_disable_bts();
628 intel_pmu_drain_bts_buffer(); 630 intel_pmu_drain_bts_buffer();
629 return; 631 return;
630 } 632 }
631 633
632 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 634 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
633 intel_pmu_disable_fixed(hwc, idx); 635 intel_pmu_disable_fixed(hwc);
634 return; 636 return;
635 } 637 }
636 638
637 x86_pmu_disable_event(hwc, idx); 639 x86_pmu_disable_event(event);
638} 640}
639 641
640static inline void 642static inline void
641intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) 643intel_pmu_enable_fixed(struct hw_perf_event *hwc)
642{ 644{
643 int idx = __idx - X86_PMC_IDX_FIXED; 645 int idx = hwc->idx - X86_PMC_IDX_FIXED;
644 u64 ctrl_val, bits, mask; 646 u64 ctrl_val, bits, mask;
645 int err; 647 int err;
646 648
@@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
670 err = checking_wrmsrl(hwc->config_base, ctrl_val); 672 err = checking_wrmsrl(hwc->config_base, ctrl_val);
671} 673}
672 674
673static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) 675static void intel_pmu_enable_event(struct perf_event *event)
674{ 676{
675 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 677 struct hw_perf_event *hwc = &event->hw;
678
679 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
676 if (!__get_cpu_var(cpu_hw_events).enabled) 680 if (!__get_cpu_var(cpu_hw_events).enabled)
677 return; 681 return;
678 682
@@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
681 } 685 }
682 686
683 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 687 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
684 intel_pmu_enable_fixed(hwc, idx); 688 intel_pmu_enable_fixed(hwc);
685 return; 689 return;
686 } 690 }
687 691
688 __x86_pmu_enable_event(hwc, idx); 692 __x86_pmu_enable_event(hwc);
689} 693}
690 694
691/* 695/*
@@ -694,14 +698,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
694 */ 698 */
695static int intel_pmu_save_and_restart(struct perf_event *event) 699static int intel_pmu_save_and_restart(struct perf_event *event)
696{ 700{
697 struct hw_perf_event *hwc = &event->hw; 701 x86_perf_event_update(event);
698 int idx = hwc->idx; 702 return x86_perf_event_set_period(event);
699 int ret;
700
701 x86_perf_event_update(event, hwc, idx);
702 ret = x86_perf_event_set_period(event, hwc, idx);
703
704 return ret;
705} 703}
706 704
707static void intel_pmu_reset(void) 705static void intel_pmu_reset(void)
@@ -745,11 +743,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
745 743
746 cpuc = &__get_cpu_var(cpu_hw_events); 744 cpuc = &__get_cpu_var(cpu_hw_events);
747 745
748 perf_disable(); 746 intel_pmu_disable_all();
749 intel_pmu_drain_bts_buffer(); 747 intel_pmu_drain_bts_buffer();
750 status = intel_pmu_get_status(); 748 status = intel_pmu_get_status();
751 if (!status) { 749 if (!status) {
752 perf_enable(); 750 intel_pmu_enable_all();
753 return 0; 751 return 0;
754 } 752 }
755 753
@@ -759,8 +757,7 @@ again:
759 WARN_ONCE(1, "perfevents: irq loop stuck!\n"); 757 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
760 perf_event_print_debug(); 758 perf_event_print_debug();
761 intel_pmu_reset(); 759 intel_pmu_reset();
762 perf_enable(); 760 goto done;
763 return 1;
764 } 761 }
765 762
766 inc_irq_stat(apic_perf_irqs); 763 inc_irq_stat(apic_perf_irqs);
@@ -768,7 +765,6 @@ again:
768 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 765 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
769 struct perf_event *event = cpuc->events[bit]; 766 struct perf_event *event = cpuc->events[bit];
770 767
771 clear_bit(bit, (unsigned long *) &status);
772 if (!test_bit(bit, cpuc->active_mask)) 768 if (!test_bit(bit, cpuc->active_mask))
773 continue; 769 continue;
774 770
@@ -778,7 +774,7 @@ again:
778 data.period = event->hw.last_period; 774 data.period = event->hw.last_period;
779 775
780 if (perf_event_overflow(event, 1, &data, regs)) 776 if (perf_event_overflow(event, 1, &data, regs))
781 intel_pmu_disable_event(&event->hw, bit); 777 x86_pmu_stop(event);
782 } 778 }
783 779
784 intel_pmu_ack_status(ack); 780 intel_pmu_ack_status(ack);
@@ -790,8 +786,8 @@ again:
790 if (status) 786 if (status)
791 goto again; 787 goto again;
792 788
793 perf_enable(); 789done:
794 790 intel_pmu_enable_all();
795 return 1; 791 return 1;
796} 792}
797 793
@@ -870,7 +866,10 @@ static __initconst struct x86_pmu intel_pmu = {
870 .max_period = (1ULL << 31) - 1, 866 .max_period = (1ULL << 31) - 1,
871 .enable_bts = intel_pmu_enable_bts, 867 .enable_bts = intel_pmu_enable_bts,
872 .disable_bts = intel_pmu_disable_bts, 868 .disable_bts = intel_pmu_disable_bts,
873 .get_event_constraints = intel_get_event_constraints 869 .get_event_constraints = intel_get_event_constraints,
870
871 .cpu_starting = init_debug_store_on_cpu,
872 .cpu_dying = fini_debug_store_on_cpu,
874}; 873};
875 874
876static __init int intel_pmu_init(void) 875static __init int intel_pmu_init(void)
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index a4e67b99d91c..a330485d14da 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void)
77} 77}
78 78
79static inline void 79static inline void
80p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) 80p6_pmu_disable_event(struct perf_event *event)
81{ 81{
82 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 82 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
83 struct hw_perf_event *hwc = &event->hw;
83 u64 val = P6_NOP_EVENT; 84 u64 val = P6_NOP_EVENT;
84 85
85 if (cpuc->enabled) 86 if (cpuc->enabled)
86 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 87 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
87 88
88 (void)checking_wrmsrl(hwc->config_base + idx, val); 89 (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
89} 90}
90 91
91static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) 92static void p6_pmu_enable_event(struct perf_event *event)
92{ 93{
93 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 94 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
95 struct hw_perf_event *hwc = &event->hw;
94 u64 val; 96 u64 val;
95 97
96 val = hwc->config; 98 val = hwc->config;
97 if (cpuc->enabled) 99 if (cpuc->enabled)
98 val |= ARCH_PERFMON_EVENTSEL_ENABLE; 100 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
99 101
100 (void)checking_wrmsrl(hwc->config_base + idx, val); 102 (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
101} 103}
102 104
103static __initconst struct x86_pmu p6_pmu = { 105static __initconst struct x86_pmu p6_pmu = {
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index 4fd1420faffa..29e5f7c845b2 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -29,4 +29,19 @@ struct stack_frame {
29 struct stack_frame *next_frame; 29 struct stack_frame *next_frame;
30 unsigned long return_address; 30 unsigned long return_address;
31}; 31};
32
33static inline unsigned long rewind_frame_pointer(int n)
34{
35 struct stack_frame *frame;
36
37 get_bp(frame);
38
39#ifdef CONFIG_FRAME_POINTER
40 while (n--)
41 frame = frame->next_frame;
32#endif 42#endif
43
44 return (unsigned long)frame;
45}
46
47#endif /* DUMPSTACK_H */
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index d5e2a2ebb627..272c9f1f05f3 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -208,7 +208,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
208 if (in_irq_stack(stack, irq_stack, irq_stack_end)) { 208 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
209 if (ops->stack(data, "IRQ") < 0) 209 if (ops->stack(data, "IRQ") < 0)
210 break; 210 break;
211 bp = print_context_stack(tinfo, stack, bp, 211 bp = ops->walk_stack(tinfo, stack, bp,
212 ops, data, irq_stack_end, &graph); 212 ops, data, irq_stack_end, &graph);
213 /* 213 /*
214 * We link to the next stack (which would be 214 * We link to the next stack (which would be
@@ -229,7 +229,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
229 /* 229 /*
230 * This handles the process stack: 230 * This handles the process stack:
231 */ 231 */
232 bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); 232 bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
233 put_cpu(); 233 put_cpu();
234} 234}
235EXPORT_SYMBOL(dump_trace); 235EXPORT_SYMBOL(dump_trace);