aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-01-27 22:42:43 -0500
committerSteven Rostedt <rostedt@goodmis.org>2011-02-07 20:56:18 -0500
commitc9c53ca03d6f97fdd9832d5ed3f15b30ee5cdb86 (patch)
tree108e2ccd275a9a3d6ddbc7a91c9aabcf883dd5ec /kernel/trace
parent58d9a597c4275d830a819625e7d437cd6fb23fa5 (diff)
tracing/filter: Dynamically allocate preds
For every filter that is made, we create predicates to hold every operation within the filter. We have a max of 32 predicates that we can hold. Currently, we allocate all 32 even if we only need to use one. Part of the reason we do this is that the filter can be used at any moment by any event. Fortunately, the filter is only used with preemption disabled. By reseting the count of preds used "n_preds" to zero, then performing a synchronize_sched(), we can safely free and reallocate a new array of preds. Cc: Tom Zanussi <tzanussi@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_events_filter.c143
2 files changed, 110 insertions, 36 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1597bc0749c1..441fc1bc85d6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -661,7 +661,8 @@ struct ftrace_event_field {
661}; 661};
662 662
663struct event_filter { 663struct event_filter {
664 int n_preds; 664 int n_preds; /* Number assigned */
665 int a_preds; /* allocated */
665 struct filter_pred **preds; 666 struct filter_pred **preds;
666 char *filter_string; 667 char *filter_string;
667}; 668};
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 5d719b340a2b..aac6a6183e6a 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -362,6 +362,7 @@ int filter_match_preds(struct event_filter *filter, void *rec)
362{ 362{
363 int match = -1, top = 0, val1 = 0, val2 = 0; 363 int match = -1, top = 0, val1 = 0, val2 = 0;
364 int stack[MAX_FILTER_PRED]; 364 int stack[MAX_FILTER_PRED];
365 struct filter_pred **preds;
365 struct filter_pred *pred; 366 struct filter_pred *pred;
366 int n_preds = ACCESS_ONCE(filter->n_preds); 367 int n_preds = ACCESS_ONCE(filter->n_preds);
367 int i; 368 int i;
@@ -370,8 +371,13 @@ int filter_match_preds(struct event_filter *filter, void *rec)
370 if (!n_preds) 371 if (!n_preds)
371 return 1; 372 return 1;
372 373
374 /*
375 * n_preds and filter->preds is protect with preemption disabled.
376 */
377 preds = rcu_dereference_sched(filter->preds);
378
373 for (i = 0; i < n_preds; i++) { 379 for (i = 0; i < n_preds; i++) {
374 pred = filter->preds[i]; 380 pred = preds[i];
375 if (!pred->pop_n) { 381 if (!pred->pop_n) {
376 match = pred->fn(pred, rec); 382 match = pred->fn(pred, rec);
377 stack[top++] = match; 383 stack[top++] = match;
@@ -548,46 +554,55 @@ static int filter_set_pred(struct filter_pred *dest,
548 return 0; 554 return 0;
549} 555}
550 556
557static void __free_preds(struct event_filter *filter)
558{
559 int i;
560
561 if (filter->preds) {
562 for (i = 0; i < filter->a_preds; i++) {
563 if (filter->preds[i])
564 filter_free_pred(filter->preds[i]);
565 }
566 kfree(filter->preds);
567 filter->preds = NULL;
568 }
569 filter->a_preds = 0;
570 filter->n_preds = 0;
571}
572
551static void filter_disable_preds(struct ftrace_event_call *call) 573static void filter_disable_preds(struct ftrace_event_call *call)
552{ 574{
553 struct event_filter *filter = call->filter; 575 struct event_filter *filter = call->filter;
554 int i; 576 int i;
555 577
556 call->flags &= ~TRACE_EVENT_FL_FILTERED; 578 call->flags &= ~TRACE_EVENT_FL_FILTERED;
579 if (filter->preds) {
580 for (i = 0; i < filter->n_preds; i++)
581 filter->preds[i]->fn = filter_pred_none;
582 }
557 filter->n_preds = 0; 583 filter->n_preds = 0;
558
559 for (i = 0; i < MAX_FILTER_PRED; i++)
560 filter->preds[i]->fn = filter_pred_none;
561} 584}
562 585
563static void __free_preds(struct event_filter *filter) 586static void __free_filter(struct event_filter *filter)
564{ 587{
565 int i;
566
567 if (!filter) 588 if (!filter)
568 return; 589 return;
569 590
570 for (i = 0; i < MAX_FILTER_PRED; i++) { 591 __free_preds(filter);
571 if (filter->preds[i])
572 filter_free_pred(filter->preds[i]);
573 }
574 kfree(filter->preds);
575 kfree(filter->filter_string); 592 kfree(filter->filter_string);
576 kfree(filter); 593 kfree(filter);
577} 594}
578 595
579void destroy_preds(struct ftrace_event_call *call) 596void destroy_preds(struct ftrace_event_call *call)
580{ 597{
581 __free_preds(call->filter); 598 __free_filter(call->filter);
582 call->filter = NULL; 599 call->filter = NULL;
583 call->flags &= ~TRACE_EVENT_FL_FILTERED; 600 call->flags &= ~TRACE_EVENT_FL_FILTERED;
584} 601}
585 602
586static struct event_filter *__alloc_preds(void) 603static struct event_filter *__alloc_filter(void)
587{ 604{
588 struct event_filter *filter; 605 struct event_filter *filter;
589 struct filter_pred *pred;
590 int i;
591 606
592 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 607 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
593 if (!filter) 608 if (!filter)
@@ -595,32 +610,63 @@ static struct event_filter *__alloc_preds(void)
595 610
596 filter->n_preds = 0; 611 filter->n_preds = 0;
597 612
598 filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), GFP_KERNEL); 613 return filter;
614}
615
616static int __alloc_preds(struct event_filter *filter, int n_preds)
617{
618 struct filter_pred *pred;
619 int i;
620
621 if (filter->preds) {
622 if (filter->a_preds < n_preds) {
623 /* We need to reallocate */
624 filter->n_preds = 0;
625 /*
626 * It is possible that the filter is currently
627 * being used. We need to zero out the number
628 * of preds, wait on preemption and then free
629 * the preds.
630 */
631 synchronize_sched();
632 __free_preds(filter);
633 }
634 }
635
636 if (!filter->preds) {
637 filter->preds =
638 kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
639 filter->a_preds = n_preds;
640 }
599 if (!filter->preds) 641 if (!filter->preds)
600 goto oom; 642 return -ENOMEM;
643
644 if (WARN_ON(filter->a_preds < n_preds))
645 return -EINVAL;
601 646
602 for (i = 0; i < MAX_FILTER_PRED; i++) { 647 for (i = 0; i < n_preds; i++) {
603 pred = kzalloc(sizeof(*pred), GFP_KERNEL); 648 pred = filter->preds[i];
649 if (!pred)
650 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
604 if (!pred) 651 if (!pred)
605 goto oom; 652 goto oom;
606 pred->fn = filter_pred_none; 653 pred->fn = filter_pred_none;
607 filter->preds[i] = pred; 654 filter->preds[i] = pred;
608 } 655 }
609 656
610 return filter; 657 return 0;
611 658 oom:
612oom:
613 __free_preds(filter); 659 __free_preds(filter);
614 return ERR_PTR(-ENOMEM); 660 return -ENOMEM;
615} 661}
616 662
617static int init_preds(struct ftrace_event_call *call) 663static int init_filter(struct ftrace_event_call *call)
618{ 664{
619 if (call->filter) 665 if (call->filter)
620 return 0; 666 return 0;
621 667
622 call->flags &= ~TRACE_EVENT_FL_FILTERED; 668 call->flags &= ~TRACE_EVENT_FL_FILTERED;
623 call->filter = __alloc_preds(); 669 call->filter = __alloc_filter();
624 if (IS_ERR(call->filter)) 670 if (IS_ERR(call->filter))
625 return PTR_ERR(call->filter); 671 return PTR_ERR(call->filter);
626 672
@@ -636,7 +682,7 @@ static int init_subsystem_preds(struct event_subsystem *system)
636 if (strcmp(call->class->system, system->name) != 0) 682 if (strcmp(call->class->system, system->name) != 0)
637 continue; 683 continue;
638 684
639 err = init_preds(call); 685 err = init_filter(call);
640 if (err) 686 if (err)
641 return err; 687 return err;
642 } 688 }
@@ -665,7 +711,7 @@ static int filter_add_pred_fn(struct filter_parse_state *ps,
665{ 711{
666 int idx, err; 712 int idx, err;
667 713
668 if (filter->n_preds == MAX_FILTER_PRED) { 714 if (WARN_ON(filter->n_preds == filter->a_preds)) {
669 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); 715 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
670 return -ENOSPC; 716 return -ENOSPC;
671 } 717 }
@@ -1179,6 +1225,20 @@ static int check_preds(struct filter_parse_state *ps)
1179 return 0; 1225 return 0;
1180} 1226}
1181 1227
1228static int count_preds(struct filter_parse_state *ps)
1229{
1230 struct postfix_elt *elt;
1231 int n_preds = 0;
1232
1233 list_for_each_entry(elt, &ps->postfix, list) {
1234 if (elt->op == OP_NONE)
1235 continue;
1236 n_preds++;
1237 }
1238
1239 return n_preds;
1240}
1241
1182static int replace_preds(struct ftrace_event_call *call, 1242static int replace_preds(struct ftrace_event_call *call,
1183 struct event_filter *filter, 1243 struct event_filter *filter,
1184 struct filter_parse_state *ps, 1244 struct filter_parse_state *ps,
@@ -1191,10 +1251,23 @@ static int replace_preds(struct ftrace_event_call *call,
1191 int err; 1251 int err;
1192 int n_preds = 0; 1252 int n_preds = 0;
1193 1253
1254 n_preds = count_preds(ps);
1255 if (n_preds >= MAX_FILTER_PRED) {
1256 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1257 return -ENOSPC;
1258 }
1259
1194 err = check_preds(ps); 1260 err = check_preds(ps);
1195 if (err) 1261 if (err)
1196 return err; 1262 return err;
1197 1263
1264 if (!dry_run) {
1265 err = __alloc_preds(filter, n_preds);
1266 if (err)
1267 return err;
1268 }
1269
1270 n_preds = 0;
1198 list_for_each_entry(elt, &ps->postfix, list) { 1271 list_for_each_entry(elt, &ps->postfix, list) {
1199 if (elt->op == OP_NONE) { 1272 if (elt->op == OP_NONE) {
1200 if (!operand1) 1273 if (!operand1)
@@ -1208,7 +1281,7 @@ static int replace_preds(struct ftrace_event_call *call,
1208 continue; 1281 continue;
1209 } 1282 }
1210 1283
1211 if (n_preds++ == MAX_FILTER_PRED) { 1284 if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
1212 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); 1285 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1213 return -ENOSPC; 1286 return -ENOSPC;
1214 } 1287 }
@@ -1283,7 +1356,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1283 1356
1284 mutex_lock(&event_mutex); 1357 mutex_lock(&event_mutex);
1285 1358
1286 err = init_preds(call); 1359 err = init_filter(call);
1287 if (err) 1360 if (err)
1288 goto out_unlock; 1361 goto out_unlock;
1289 1362
@@ -1376,7 +1449,7 @@ void ftrace_profile_free_filter(struct perf_event *event)
1376 struct event_filter *filter = event->filter; 1449 struct event_filter *filter = event->filter;
1377 1450
1378 event->filter = NULL; 1451 event->filter = NULL;
1379 __free_preds(filter); 1452 __free_filter(filter);
1380} 1453}
1381 1454
1382int ftrace_profile_set_filter(struct perf_event *event, int event_id, 1455int ftrace_profile_set_filter(struct perf_event *event, int event_id,
@@ -1402,7 +1475,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1402 if (event->filter) 1475 if (event->filter)
1403 goto out_unlock; 1476 goto out_unlock;
1404 1477
1405 filter = __alloc_preds(); 1478 filter = __alloc_filter();
1406 if (IS_ERR(filter)) { 1479 if (IS_ERR(filter)) {
1407 err = PTR_ERR(filter); 1480 err = PTR_ERR(filter);
1408 goto out_unlock; 1481 goto out_unlock;
@@ -1411,7 +1484,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1411 err = -ENOMEM; 1484 err = -ENOMEM;
1412 ps = kzalloc(sizeof(*ps), GFP_KERNEL); 1485 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1413 if (!ps) 1486 if (!ps)
1414 goto free_preds; 1487 goto free_filter;
1415 1488
1416 parse_init(ps, filter_ops, filter_str); 1489 parse_init(ps, filter_ops, filter_str);
1417 err = filter_parse(ps); 1490 err = filter_parse(ps);
@@ -1427,9 +1500,9 @@ free_ps:
1427 postfix_clear(ps); 1500 postfix_clear(ps);
1428 kfree(ps); 1501 kfree(ps);
1429 1502
1430free_preds: 1503free_filter:
1431 if (err) 1504 if (err)
1432 __free_preds(filter); 1505 __free_filter(filter);
1433 1506
1434out_unlock: 1507out_unlock:
1435 mutex_unlock(&event_mutex); 1508 mutex_unlock(&event_mutex);