aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-01-09 15:05:28 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2010-01-16 06:30:28 -0500
commite286417378b4f9ce6e473b556193465ab22e12ab (patch)
tree03da147ea654d6475ae3c739906c4e8f6804d0db /kernel/perf_event.c
parent5908cdc85eb30f8d07f2cb11d4a62334d7229048 (diff)
perf: Round robin flexible groups of events using list_rotate_left()
This is more proper that doing it through a list_for_each_entry() that breaks after the first entry. v2: Don't rotate pinned groups as its not needed to time share them. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index c9f8a757649d..bbebe2832639 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1454,25 +1454,16 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1454 */ 1454 */
1455static void rotate_ctx(struct perf_event_context *ctx) 1455static void rotate_ctx(struct perf_event_context *ctx)
1456{ 1456{
1457 struct perf_event *event;
1458
1459 if (!ctx->nr_events) 1457 if (!ctx->nr_events)
1460 return; 1458 return;
1461 1459
1462 raw_spin_lock(&ctx->lock); 1460 raw_spin_lock(&ctx->lock);
1463 /* 1461
1464 * Rotate the first entry last (works just fine for group events too): 1462 /* Rotate the first entry last of non-pinned groups */
1465 */
1466 perf_disable(); 1463 perf_disable();
1467 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1468 list_move_tail(&event->group_entry, &ctx->pinned_groups);
1469 break;
1470 }
1471 1464
1472 list_for_each_entry(event, &ctx->flexible_groups, group_entry) { 1465 list_rotate_left(&ctx->flexible_groups);
1473 list_move_tail(&event->group_entry, &ctx->flexible_groups); 1466
1474 break;
1475 }
1476 perf_enable(); 1467 perf_enable();
1477 1468
1478 raw_spin_unlock(&ctx->lock); 1469 raw_spin_unlock(&ctx->lock);