aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/perf_event.c34
-rw-r--r--kernel/trace/blktrace.c39
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/kmemtrace.c2
5 files changed, 53 insertions, 28 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6d7020490f94..3e1c36e7998f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -726,8 +726,6 @@ static int hrtimer_switch_to_hres(void)
726 /* "Retrigger" the interrupt to get things going */ 726 /* "Retrigger" the interrupt to get things going */
727 retrigger_next_event(NULL); 727 retrigger_next_event(NULL);
728 local_irq_restore(flags); 728 local_irq_restore(flags);
729 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
730 smp_processor_id());
731 return 1; 729 return 1;
732} 730}
733 731
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0f86feb6db0c..e491fb087939 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1030,14 +1030,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1030 update_context_time(ctx); 1030 update_context_time(ctx);
1031 1031
1032 perf_disable(); 1032 perf_disable();
1033 if (ctx->nr_active) { 1033 if (ctx->nr_active)
1034 list_for_each_entry(event, &ctx->group_list, group_entry) { 1034 list_for_each_entry(event, &ctx->group_list, group_entry)
1035 if (event != event->group_leader) 1035 group_sched_out(event, cpuctx, ctx);
1036 event_sched_out(event, cpuctx, ctx); 1036
1037 else
1038 group_sched_out(event, cpuctx, ctx);
1039 }
1040 }
1041 perf_enable(); 1037 perf_enable();
1042 out: 1038 out:
1043 spin_unlock(&ctx->lock); 1039 spin_unlock(&ctx->lock);
@@ -1258,12 +1254,8 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1258 if (event->cpu != -1 && event->cpu != cpu) 1254 if (event->cpu != -1 && event->cpu != cpu)
1259 continue; 1255 continue;
1260 1256
1261 if (event != event->group_leader) 1257 if (group_can_go_on(event, cpuctx, 1))
1262 event_sched_in(event, cpuctx, ctx, cpu); 1258 group_sched_in(event, cpuctx, ctx, cpu);
1263 else {
1264 if (group_can_go_on(event, cpuctx, 1))
1265 group_sched_in(event, cpuctx, ctx, cpu);
1266 }
1267 1259
1268 /* 1260 /*
1269 * If this pinned group hasn't been scheduled, 1261 * If this pinned group hasn't been scheduled,
@@ -1291,15 +1283,9 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1291 if (event->cpu != -1 && event->cpu != cpu) 1283 if (event->cpu != -1 && event->cpu != cpu)
1292 continue; 1284 continue;
1293 1285
1294 if (event != event->group_leader) { 1286 if (group_can_go_on(event, cpuctx, can_add_hw))
1295 if (event_sched_in(event, cpuctx, ctx, cpu)) 1287 if (group_sched_in(event, cpuctx, ctx, cpu))
1296 can_add_hw = 0; 1288 can_add_hw = 0;
1297 } else {
1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1300 can_add_hw = 0;
1301 }
1302 }
1303 } 1289 }
1304 perf_enable(); 1290 perf_enable();
1305 out: 1291 out:
@@ -4781,9 +4767,7 @@ int perf_event_init_task(struct task_struct *child)
4781 * We dont have to disable NMIs - we are only looking at 4767 * We dont have to disable NMIs - we are only looking at
4782 * the list, not manipulating it: 4768 * the list, not manipulating it:
4783 */ 4769 */
4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { 4770 list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
4785 if (event != event->group_leader)
4786 continue;
4787 4771
4788 if (!event->attr.inherit) { 4772 if (!event->attr.inherit) {
4789 inherited_all = 0; 4773 inherited_all = 0;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3eb159c277c8..d9d6206e0b14 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
856} 856}
857 857
858/** 858/**
859 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
860 * @q: queue the io is for
861 * @rq: the source request
862 * @dev: target device
863 * @from: source sector
864 *
865 * Description:
866 * Device mapper remaps request to other devices.
867 * Add a trace for that action.
868 *
869 **/
870static void blk_add_trace_rq_remap(struct request_queue *q,
871 struct request *rq, dev_t dev,
872 sector_t from)
873{
874 struct blk_trace *bt = q->blk_trace;
875 struct blk_io_trace_remap r;
876
877 if (likely(!bt))
878 return;
879
880 r.device_from = cpu_to_be32(dev);
881 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
882 r.sector_from = cpu_to_be64(from);
883
884 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
885 rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
886 sizeof(r), &r);
887}
888
889/**
859 * blk_add_driver_data - Add binary message with driver-specific data 890 * blk_add_driver_data - Add binary message with driver-specific data
860 * @q: queue the io is for 891 * @q: queue the io is for
861 * @rq: io request 892 * @rq: io request
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
922 WARN_ON(ret); 953 WARN_ON(ret);
923 ret = register_trace_block_remap(blk_add_trace_remap); 954 ret = register_trace_block_remap(blk_add_trace_remap);
924 WARN_ON(ret); 955 WARN_ON(ret);
956 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
957 WARN_ON(ret);
925} 958}
926 959
927static void blk_unregister_tracepoints(void) 960static void blk_unregister_tracepoints(void)
928{ 961{
962 unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
929 unregister_trace_block_remap(blk_add_trace_remap); 963 unregister_trace_block_remap(blk_add_trace_remap);
930 unregister_trace_block_split(blk_add_trace_split); 964 unregister_trace_block_split(blk_add_trace_split);
931 unregister_trace_block_unplug_io(blk_add_trace_unplug_io); 965 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
1657 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); 1691 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1658} 1692}
1659 1693
1694void blk_trace_remove_sysfs(struct device *dev)
1695{
1696 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1697}
1698
1660#endif /* CONFIG_BLK_DEV_IO_TRACE */ 1699#endif /* CONFIG_BLK_DEV_IO_TRACE */
1661 1700
1662#ifdef CONFIG_EVENT_TRACING 1701#ifdef CONFIG_EVENT_TRACING
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 46592feab5a6..3724756e41ca 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
225 if (ftrace_trace_function == ftrace_stub) 225 if (ftrace_trace_function == ftrace_stub)
226 return; 226 return;
227 227
228#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
228 func = ftrace_trace_function; 229 func = ftrace_trace_function;
230#else
231 func = __ftrace_trace_function;
232#endif
229 233
230 if (ftrace_pid_trace) { 234 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func); 235 set_ftrace_pid_function(func);
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c8549..a91da69f153a 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
501 return 1; 501 return 1;
502 } 502 }
503 503
504 if (!register_tracer(&kmem_tracer)) { 504 if (register_tracer(&kmem_tracer) != 0) {
505 pr_warning("Warning: could not register the kmem tracer\n"); 505 pr_warning("Warning: could not register the kmem tracer\n");
506 return 1; 506 return 1;
507 } 507 }