aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/powerpc/kernel/perf_event.c3
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c8
-rw-r--r--arch/sh/kernel/perf_event.c11
-rw-r--r--arch/sparc/kernel/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.c22
-rw-r--r--include/linux/perf_event.h20
-rw-r--r--kernel/perf_event.c37
8 files changed, 48 insertions, 59 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f62f9db35db3..afc92c580d18 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -277,6 +277,8 @@ armpmu_enable(struct perf_event *event)
277 int idx; 277 int idx;
278 int err = 0; 278 int err = 0;
279 279
280 perf_disable();
281
280 /* If we don't have a space for the counter then finish early. */ 282 /* If we don't have a space for the counter then finish early. */
281 idx = armpmu->get_event_idx(cpuc, hwc); 283 idx = armpmu->get_event_idx(cpuc, hwc);
282 if (idx < 0) { 284 if (idx < 0) {
@@ -303,6 +305,7 @@ armpmu_enable(struct perf_event *event)
303 perf_event_update_userpage(event); 305 perf_event_update_userpage(event);
304 306
305out: 307out:
308 perf_enable();
306 return err; 309 return err;
307} 310}
308 311
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 19131b2614b9..c1408821dbc2 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -861,6 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
861{ 861{
862 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 862 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
863 863
864 perf_disable();
864 cpuhw->group_flag |= PERF_EVENT_TXN; 865 cpuhw->group_flag |= PERF_EVENT_TXN;
865 cpuhw->n_txn_start = cpuhw->n_events; 866 cpuhw->n_txn_start = cpuhw->n_events;
866} 867}
@@ -875,6 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
875 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 876 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
876 877
877 cpuhw->group_flag &= ~PERF_EVENT_TXN; 878 cpuhw->group_flag &= ~PERF_EVENT_TXN;
879 perf_enable();
878} 880}
879 881
880/* 882/*
@@ -901,6 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
901 cpuhw->event[i]->hw.config = cpuhw->events[i]; 903 cpuhw->event[i]->hw.config = cpuhw->events[i];
902 904
903 cpuhw->group_flag &= ~PERF_EVENT_TXN; 905 cpuhw->group_flag &= ~PERF_EVENT_TXN;
906 perf_enable();
904 return 0; 907 return 0;
905} 908}
906 909
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index ea6a804e43fd..9bc84a7fd901 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -262,7 +262,7 @@ static int collect_events(struct perf_event *group, int max_count,
262 return n; 262 return n;
263} 263}
264 264
265/* perf must be disabled, context locked on entry */ 265/* context locked on entry */
266static int fsl_emb_pmu_enable(struct perf_event *event) 266static int fsl_emb_pmu_enable(struct perf_event *event)
267{ 267{
268 struct cpu_hw_events *cpuhw; 268 struct cpu_hw_events *cpuhw;
@@ -271,6 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
271 u64 val; 271 u64 val;
272 int i; 272 int i;
273 273
274 perf_disable();
274 cpuhw = &get_cpu_var(cpu_hw_events); 275 cpuhw = &get_cpu_var(cpu_hw_events);
275 276
276 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) 277 if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -310,15 +311,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
310 ret = 0; 311 ret = 0;
311 out: 312 out:
312 put_cpu_var(cpu_hw_events); 313 put_cpu_var(cpu_hw_events);
314 perf_enable();
313 return ret; 315 return ret;
314} 316}
315 317
316/* perf must be disabled, context locked on entry */ 318/* context locked on entry */
317static void fsl_emb_pmu_disable(struct perf_event *event) 319static void fsl_emb_pmu_disable(struct perf_event *event)
318{ 320{
319 struct cpu_hw_events *cpuhw; 321 struct cpu_hw_events *cpuhw;
320 int i = event->hw.idx; 322 int i = event->hw.idx;
321 323
324 perf_disable();
322 if (i < 0) 325 if (i < 0)
323 goto out; 326 goto out;
324 327
@@ -346,6 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
346 cpuhw->n_events--; 349 cpuhw->n_events--;
347 350
348 out: 351 out:
352 perf_enable();
349 put_cpu_var(cpu_hw_events); 353 put_cpu_var(cpu_hw_events);
350} 354}
351 355
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 8cb206597e0c..d042989ceb45 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -230,11 +230,14 @@ static int sh_pmu_enable(struct perf_event *event)
230 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 230 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
231 struct hw_perf_event *hwc = &event->hw; 231 struct hw_perf_event *hwc = &event->hw;
232 int idx = hwc->idx; 232 int idx = hwc->idx;
233 int ret = -EAGAIN;
234
235 perf_disable();
233 236
234 if (test_and_set_bit(idx, cpuc->used_mask)) { 237 if (test_and_set_bit(idx, cpuc->used_mask)) {
235 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); 238 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
236 if (idx == sh_pmu->num_events) 239 if (idx == sh_pmu->num_events)
237 return -EAGAIN; 240 goto out;
238 241
239 set_bit(idx, cpuc->used_mask); 242 set_bit(idx, cpuc->used_mask);
240 hwc->idx = idx; 243 hwc->idx = idx;
@@ -248,8 +251,10 @@ static int sh_pmu_enable(struct perf_event *event)
248 sh_pmu->enable(hwc, idx); 251 sh_pmu->enable(hwc, idx);
249 252
250 perf_event_update_userpage(event); 253 perf_event_update_userpage(event);
251 254 ret = 0;
252 return 0; 255out:
256 perf_enable();
257 return ret;
253} 258}
254 259
255static void sh_pmu_read(struct perf_event *event) 260static void sh_pmu_read(struct perf_event *event)
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index bed4327f5a7a..d0131deeeaf6 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1113,6 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
1113{ 1113{
1114 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1114 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1115 1115
1116 perf_disable();
1116 cpuhw->group_flag |= PERF_EVENT_TXN; 1117 cpuhw->group_flag |= PERF_EVENT_TXN;
1117} 1118}
1118 1119
@@ -1126,6 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
1126 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1127 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1127 1128
1128 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1129 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1130 perf_enable();
1129} 1131}
1130 1132
1131/* 1133/*
@@ -1149,6 +1151,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
1149 return -EAGAIN; 1151 return -EAGAIN;
1150 1152
1151 cpuc->group_flag &= ~PERF_EVENT_TXN; 1153 cpuc->group_flag &= ~PERF_EVENT_TXN;
1154 perf_enable();
1152 return 0; 1155 return 0;
1153} 1156}
1154 1157
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2c89264ee791..846070ce49c3 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -969,10 +969,11 @@ static int x86_pmu_enable(struct perf_event *event)
969 969
970 hwc = &event->hw; 970 hwc = &event->hw;
971 971
972 perf_disable();
972 n0 = cpuc->n_events; 973 n0 = cpuc->n_events;
973 n = collect_events(cpuc, event, false); 974 ret = n = collect_events(cpuc, event, false);
974 if (n < 0) 975 if (ret < 0)
975 return n; 976 goto out;
976 977
977 /* 978 /*
978 * If group events scheduling transaction was started, 979 * If group events scheduling transaction was started,
@@ -980,23 +981,26 @@ static int x86_pmu_enable(struct perf_event *event)
980 * at commit time(->commit_txn) as a whole 981 * at commit time(->commit_txn) as a whole
981 */ 982 */
982 if (cpuc->group_flag & PERF_EVENT_TXN) 983 if (cpuc->group_flag & PERF_EVENT_TXN)
983 goto out; 984 goto done_collect;
984 985
985 ret = x86_pmu.schedule_events(cpuc, n, assign); 986 ret = x86_pmu.schedule_events(cpuc, n, assign);
986 if (ret) 987 if (ret)
987 return ret; 988 goto out;
988 /* 989 /*
989 * copy new assignment, now we know it is possible 990 * copy new assignment, now we know it is possible
990 * will be used by hw_perf_enable() 991 * will be used by hw_perf_enable()
991 */ 992 */
992 memcpy(cpuc->assign, assign, n*sizeof(int)); 993 memcpy(cpuc->assign, assign, n*sizeof(int));
993 994
994out: 995done_collect:
995 cpuc->n_events = n; 996 cpuc->n_events = n;
996 cpuc->n_added += n - n0; 997 cpuc->n_added += n - n0;
997 cpuc->n_txn += n - n0; 998 cpuc->n_txn += n - n0;
998 999
999 return 0; 1000 ret = 0;
1001out:
1002 perf_enable();
1003 return ret;
1000} 1004}
1001 1005
1002static int x86_pmu_start(struct perf_event *event) 1006static int x86_pmu_start(struct perf_event *event)
@@ -1432,6 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu)
1432{ 1436{
1433 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1437 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1434 1438
1439 perf_disable();
1435 cpuc->group_flag |= PERF_EVENT_TXN; 1440 cpuc->group_flag |= PERF_EVENT_TXN;
1436 cpuc->n_txn = 0; 1441 cpuc->n_txn = 0;
1437} 1442}
@@ -1451,6 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
1451 */ 1456 */
1452 cpuc->n_added -= cpuc->n_txn; 1457 cpuc->n_added -= cpuc->n_txn;
1453 cpuc->n_events -= cpuc->n_txn; 1458 cpuc->n_events -= cpuc->n_txn;
1459 perf_enable();
1454} 1460}
1455 1461
1456/* 1462/*
@@ -1480,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
1480 memcpy(cpuc->assign, assign, n*sizeof(int)); 1486 memcpy(cpuc->assign, assign, n*sizeof(int));
1481 1487
1482 cpuc->group_flag &= ~PERF_EVENT_TXN; 1488 cpuc->group_flag &= ~PERF_EVENT_TXN;
1483 1489 perf_enable();
1484 return 0; 1490 return 0;
1485} 1491}
1486 1492
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index ab72f56eb372..243286a8ded7 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -564,26 +564,26 @@ struct pmu {
564 struct list_head entry; 564 struct list_head entry;
565 565
566 /* 566 /*
567 * Should return -ENOENT when the @event doesn't match this pmu 567 * Should return -ENOENT when the @event doesn't match this PMU.
568 */ 568 */
569 int (*event_init) (struct perf_event *event); 569 int (*event_init) (struct perf_event *event);
570 570
571 int (*enable) (struct perf_event *event); 571 int (*enable) (struct perf_event *event);
572 void (*disable) (struct perf_event *event); 572 void (*disable) (struct perf_event *event);
573 int (*start) (struct perf_event *event); 573 int (*start) (struct perf_event *event);
574 void (*stop) (struct perf_event *event); 574 void (*stop) (struct perf_event *event);
575 void (*read) (struct perf_event *event); 575 void (*read) (struct perf_event *event);
576 void (*unthrottle) (struct perf_event *event); 576 void (*unthrottle) (struct perf_event *event);
577 577
578 /* 578 /*
579 * Group events scheduling is treated as a transaction, add group 579 * Group events scheduling is treated as a transaction, add
580 * events as a whole and perform one schedulability test. If the test 580 * group events as a whole and perform one schedulability test.
581 * fails, roll back the whole group 581 * If the test fails, roll back the whole group
582 */ 582 */
583 583
584 /* 584 /*
585 * Start the transaction, after this ->enable() doesn't need 585 * Start the transaction, after this ->enable() doesn't need to
586 * to do schedulability tests. 586 * do schedulability tests.
587 */ 587 */
588 void (*start_txn) (struct pmu *pmu); 588 void (*start_txn) (struct pmu *pmu);
589 /* 589 /*
@@ -594,8 +594,8 @@ struct pmu {
594 */ 594 */
595 int (*commit_txn) (struct pmu *pmu); 595 int (*commit_txn) (struct pmu *pmu);
596 /* 596 /*
597 * Will cancel the transaction, assumes ->disable() is called for 597 * Will cancel the transaction, assumes ->disable() is called
598 * each successfull ->enable() during the transaction. 598 * for each successfull ->enable() during the transaction.
599 */ 599 */
600 void (*cancel_txn) (struct pmu *pmu); 600 void (*cancel_txn) (struct pmu *pmu);
601}; 601};
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 149ca18371b7..9a98ce953561 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -478,11 +478,6 @@ static void __perf_event_remove_from_context(void *info)
478 return; 478 return;
479 479
480 raw_spin_lock(&ctx->lock); 480 raw_spin_lock(&ctx->lock);
481 /*
482 * Protect the list operation against NMI by disabling the
483 * events on a global level.
484 */
485 perf_disable();
486 481
487 event_sched_out(event, cpuctx, ctx); 482 event_sched_out(event, cpuctx, ctx);
488 483
@@ -498,7 +493,6 @@ static void __perf_event_remove_from_context(void *info)
498 perf_max_events - perf_reserved_percpu); 493 perf_max_events - perf_reserved_percpu);
499 } 494 }
500 495
501 perf_enable();
502 raw_spin_unlock(&ctx->lock); 496 raw_spin_unlock(&ctx->lock);
503} 497}
504 498
@@ -803,12 +797,6 @@ static void __perf_install_in_context(void *info)
803 ctx->is_active = 1; 797 ctx->is_active = 1;
804 update_context_time(ctx); 798 update_context_time(ctx);
805 799
806 /*
807 * Protect the list operation against NMI by disabling the
808 * events on a global level. NOP for non NMI based events.
809 */
810 perf_disable();
811
812 add_event_to_ctx(event, ctx); 800 add_event_to_ctx(event, ctx);
813 801
814 if (event->cpu != -1 && event->cpu != smp_processor_id()) 802 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -850,8 +838,6 @@ static void __perf_install_in_context(void *info)
850 cpuctx->max_pertask--; 838 cpuctx->max_pertask--;
851 839
852unlock: 840unlock:
853 perf_enable();
854
855 raw_spin_unlock(&ctx->lock); 841 raw_spin_unlock(&ctx->lock);
856} 842}
857 843
@@ -972,12 +958,10 @@ static void __perf_event_enable(void *info)
972 if (!group_can_go_on(event, cpuctx, 1)) { 958 if (!group_can_go_on(event, cpuctx, 1)) {
973 err = -EEXIST; 959 err = -EEXIST;
974 } else { 960 } else {
975 perf_disable();
976 if (event == leader) 961 if (event == leader)
977 err = group_sched_in(event, cpuctx, ctx); 962 err = group_sched_in(event, cpuctx, ctx);
978 else 963 else
979 err = event_sched_in(event, cpuctx, ctx); 964 err = event_sched_in(event, cpuctx, ctx);
980 perf_enable();
981 } 965 }
982 966
983 if (err) { 967 if (err) {
@@ -1090,9 +1074,8 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1090 goto out; 1074 goto out;
1091 update_context_time(ctx); 1075 update_context_time(ctx);
1092 1076
1093 perf_disable();
1094 if (!ctx->nr_active) 1077 if (!ctx->nr_active)
1095 goto out_enable; 1078 goto out;
1096 1079
1097 if (event_type & EVENT_PINNED) { 1080 if (event_type & EVENT_PINNED) {
1098 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 1081 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
@@ -1103,9 +1086,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1103 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 1086 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1104 group_sched_out(event, cpuctx, ctx); 1087 group_sched_out(event, cpuctx, ctx);
1105 } 1088 }
1106
1107 out_enable:
1108 perf_enable();
1109out: 1089out:
1110 raw_spin_unlock(&ctx->lock); 1090 raw_spin_unlock(&ctx->lock);
1111} 1091}
@@ -1364,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx,
1364 1344
1365 ctx->timestamp = perf_clock(); 1345 ctx->timestamp = perf_clock();
1366 1346
1367 perf_disable();
1368
1369 /* 1347 /*
1370 * First go through the list and put on any pinned groups 1348 * First go through the list and put on any pinned groups
1371 * in order to give them the best chance of going on. 1349 * in order to give them the best chance of going on.
@@ -1377,7 +1355,6 @@ ctx_sched_in(struct perf_event_context *ctx,
1377 if (event_type & EVENT_FLEXIBLE) 1355 if (event_type & EVENT_FLEXIBLE)
1378 ctx_flexible_sched_in(ctx, cpuctx); 1356 ctx_flexible_sched_in(ctx, cpuctx);
1379 1357
1380 perf_enable();
1381out: 1358out:
1382 raw_spin_unlock(&ctx->lock); 1359 raw_spin_unlock(&ctx->lock);
1383} 1360}
@@ -1425,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task)
1425 if (cpuctx->task_ctx == ctx) 1402 if (cpuctx->task_ctx == ctx)
1426 return; 1403 return;
1427 1404
1428 perf_disable();
1429
1430 /* 1405 /*
1431 * We want to keep the following priority order: 1406 * We want to keep the following priority order:
1432 * cpu pinned (that don't need to move), task pinned, 1407 * cpu pinned (that don't need to move), task pinned,
@@ -1439,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task)
1439 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); 1414 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1440 1415
1441 cpuctx->task_ctx = ctx; 1416 cpuctx->task_ctx = ctx;
1442
1443 perf_enable();
1444} 1417}
1445 1418
1446#define MAX_INTERRUPTS (~0ULL) 1419#define MAX_INTERRUPTS (~0ULL)
@@ -1555,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1555 hwc->sample_period = sample_period; 1528 hwc->sample_period = sample_period;
1556 1529
1557 if (local64_read(&hwc->period_left) > 8*sample_period) { 1530 if (local64_read(&hwc->period_left) > 8*sample_period) {
1558 perf_disable();
1559 perf_event_stop(event); 1531 perf_event_stop(event);
1560 local64_set(&hwc->period_left, 0); 1532 local64_set(&hwc->period_left, 0);
1561 perf_event_start(event); 1533 perf_event_start(event);
1562 perf_enable();
1563 } 1534 }
1564} 1535}
1565 1536
@@ -1588,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1588 */ 1559 */
1589 if (interrupts == MAX_INTERRUPTS) { 1560 if (interrupts == MAX_INTERRUPTS) {
1590 perf_log_throttle(event, 1); 1561 perf_log_throttle(event, 1);
1591 perf_disable();
1592 event->pmu->unthrottle(event); 1562 event->pmu->unthrottle(event);
1593 perf_enable();
1594 } 1563 }
1595 1564
1596 if (!event->attr.freq || !event->attr.sample_freq) 1565 if (!event->attr.freq || !event->attr.sample_freq)
1597 continue; 1566 continue;
1598 1567
1599 perf_disable();
1600 event->pmu->read(event); 1568 event->pmu->read(event);
1601 now = local64_read(&event->count); 1569 now = local64_read(&event->count);
1602 delta = now - hwc->freq_count_stamp; 1570 delta = now - hwc->freq_count_stamp;
@@ -1604,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1604 1572
1605 if (delta > 0) 1573 if (delta > 0)
1606 perf_adjust_period(event, TICK_NSEC, delta); 1574 perf_adjust_period(event, TICK_NSEC, delta);
1607 perf_enable();
1608 } 1575 }
1609 raw_spin_unlock(&ctx->lock); 1576 raw_spin_unlock(&ctx->lock);
1610} 1577}
@@ -1647,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr)
1647 if (!rotate) 1614 if (!rotate)
1648 return; 1615 return;
1649 1616
1650 perf_disable();
1651 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 1617 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1652 if (ctx) 1618 if (ctx)
1653 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 1619 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1659,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr)
1659 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); 1625 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1660 if (ctx) 1626 if (ctx)
1661 task_ctx_sched_in(curr, EVENT_FLEXIBLE); 1627 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1662 perf_enable();
1663} 1628}
1664 1629
1665static int event_enable_on_exec(struct perf_event *event, 1630static int event_enable_on_exec(struct perf_event *event,