diff options
| author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-10-30 10:26:45 -0400 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-10-30 10:26:45 -0400 |
| commit | 05cd2544f4b64d9a9eca0d170191867495e01feb (patch) | |
| tree | 10146fb79bba464bac6f620d03b8cd50285065b7 /kernel | |
| parent | 27f680adc65ca08bf72ed85d3a48d1ee70f77c7a (diff) | |
| parent | a0601c8944dc08c2d349c24bd9c0b09c406229fc (diff) | |
Merge branch 'fix' of git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 4 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 2 | ||||
| -rw-r--r-- | kernel/lockdep.c | 17 | ||||
| -rw-r--r-- | kernel/printk.c | 39 | ||||
| -rw-r--r-- | kernel/resource.c | 2 | ||||
| -rw-r--r-- | kernel/sched.c | 3 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 169 | ||||
| -rw-r--r-- | kernel/sched_idletask.c | 5 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 5 | ||||
| -rw-r--r-- | kernel/sysctl.c | 2 | ||||
| -rw-r--r-- | kernel/trace/Kconfig | 27 | ||||
| -rw-r--r-- | kernel/trace/Makefile | 6 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 608 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 15 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest.c | 18 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 4 | ||||
| -rw-r--r-- | kernel/tracepoint.c | 8 |
22 files changed, 284 insertions, 668 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 305f11dbef21..9a3ec66a9d84 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -13,7 +13,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
| 13 | 13 | ||
| 14 | CFLAGS_REMOVE_sched.o = -mno-spe | 14 | CFLAGS_REMOVE_sched.o = -mno-spe |
| 15 | 15 | ||
| 16 | ifdef CONFIG_FTRACE | 16 | ifdef CONFIG_FUNCTION_TRACER |
| 17 | # Do not trace debug files and internal ftrace files | 17 | # Do not trace debug files and internal ftrace files |
| 18 | CFLAGS_REMOVE_lockdep.o = -pg | 18 | CFLAGS_REMOVE_lockdep.o = -pg |
| 19 | CFLAGS_REMOVE_lockdep_proc.o = -pg | 19 | CFLAGS_REMOVE_lockdep_proc.o = -pg |
| @@ -88,7 +88,7 @@ obj-$(CONFIG_MARKERS) += marker.o | |||
| 88 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o | 88 | obj-$(CONFIG_TRACEPOINTS) += tracepoint.o |
| 89 | obj-$(CONFIG_LATENCYTOP) += latencytop.o | 89 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
| 90 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | 90 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o |
| 91 | obj-$(CONFIG_FTRACE) += trace/ | 91 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
| 92 | obj-$(CONFIG_TRACING) += trace/ | 92 | obj-$(CONFIG_TRACING) += trace/ |
| 93 | obj-$(CONFIG_SMP) += sched_cpupri.o | 93 | obj-$(CONFIG_SMP) += sched_cpupri.o |
| 94 | 94 | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index fac014a81b24..4d161c70ba55 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -220,7 +220,7 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) | |||
| 220 | } | 220 | } |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | void register_default_affinity_proc(void) | 223 | static void register_default_affinity_proc(void) |
| 224 | { | 224 | { |
| 225 | #ifdef CONFIG_SMP | 225 | #ifdef CONFIG_SMP |
| 226 | proc_create("irq/default_smp_affinity", 0600, NULL, | 226 | proc_create("irq/default_smp_affinity", 0600, NULL, |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index dbda475b13bd..06e157119d2b 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -2169,12 +2169,11 @@ void early_boot_irqs_on(void) | |||
| 2169 | /* | 2169 | /* |
| 2170 | * Hardirqs will be enabled: | 2170 | * Hardirqs will be enabled: |
| 2171 | */ | 2171 | */ |
| 2172 | void trace_hardirqs_on_caller(unsigned long a0) | 2172 | void trace_hardirqs_on_caller(unsigned long ip) |
| 2173 | { | 2173 | { |
| 2174 | struct task_struct *curr = current; | 2174 | struct task_struct *curr = current; |
| 2175 | unsigned long ip; | ||
| 2176 | 2175 | ||
| 2177 | time_hardirqs_on(CALLER_ADDR0, a0); | 2176 | time_hardirqs_on(CALLER_ADDR0, ip); |
| 2178 | 2177 | ||
| 2179 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2178 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2180 | return; | 2179 | return; |
| @@ -2188,7 +2187,6 @@ void trace_hardirqs_on_caller(unsigned long a0) | |||
| 2188 | } | 2187 | } |
| 2189 | /* we'll do an OFF -> ON transition: */ | 2188 | /* we'll do an OFF -> ON transition: */ |
| 2190 | curr->hardirqs_enabled = 1; | 2189 | curr->hardirqs_enabled = 1; |
| 2191 | ip = (unsigned long) __builtin_return_address(0); | ||
| 2192 | 2190 | ||
| 2193 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2191 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2194 | return; | 2192 | return; |
| @@ -2224,11 +2222,11 @@ EXPORT_SYMBOL(trace_hardirqs_on); | |||
| 2224 | /* | 2222 | /* |
| 2225 | * Hardirqs were disabled: | 2223 | * Hardirqs were disabled: |
| 2226 | */ | 2224 | */ |
| 2227 | void trace_hardirqs_off_caller(unsigned long a0) | 2225 | void trace_hardirqs_off_caller(unsigned long ip) |
| 2228 | { | 2226 | { |
| 2229 | struct task_struct *curr = current; | 2227 | struct task_struct *curr = current; |
| 2230 | 2228 | ||
| 2231 | time_hardirqs_off(CALLER_ADDR0, a0); | 2229 | time_hardirqs_off(CALLER_ADDR0, ip); |
| 2232 | 2230 | ||
| 2233 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2231 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2234 | return; | 2232 | return; |
| @@ -2241,7 +2239,7 @@ void trace_hardirqs_off_caller(unsigned long a0) | |||
| 2241 | * We have done an ON -> OFF transition: | 2239 | * We have done an ON -> OFF transition: |
| 2242 | */ | 2240 | */ |
| 2243 | curr->hardirqs_enabled = 0; | 2241 | curr->hardirqs_enabled = 0; |
| 2244 | curr->hardirq_disable_ip = _RET_IP_; | 2242 | curr->hardirq_disable_ip = ip; |
| 2245 | curr->hardirq_disable_event = ++curr->irq_events; | 2243 | curr->hardirq_disable_event = ++curr->irq_events; |
| 2246 | debug_atomic_inc(&hardirqs_off_events); | 2244 | debug_atomic_inc(&hardirqs_off_events); |
| 2247 | } else | 2245 | } else |
| @@ -3417,9 +3415,10 @@ retry: | |||
| 3417 | } | 3415 | } |
| 3418 | printk(" ignoring it.\n"); | 3416 | printk(" ignoring it.\n"); |
| 3419 | unlock = 0; | 3417 | unlock = 0; |
| 3418 | } else { | ||
| 3419 | if (count != 10) | ||
| 3420 | printk(KERN_CONT " locked it.\n"); | ||
| 3420 | } | 3421 | } |
| 3421 | if (count != 10) | ||
| 3422 | printk(" locked it.\n"); | ||
| 3423 | 3422 | ||
| 3424 | do_each_thread(g, p) { | 3423 | do_each_thread(g, p) { |
| 3425 | /* | 3424 | /* |
diff --git a/kernel/printk.c b/kernel/printk.c index 6341af77eb65..f492f1583d77 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -233,45 +233,6 @@ static inline void boot_delay_msec(void) | |||
| 233 | #endif | 233 | #endif |
| 234 | 234 | ||
| 235 | /* | 235 | /* |
| 236 | * Return the number of unread characters in the log buffer. | ||
| 237 | */ | ||
| 238 | static int log_buf_get_len(void) | ||
| 239 | { | ||
| 240 | return logged_chars; | ||
| 241 | } | ||
| 242 | |||
| 243 | /* | ||
| 244 | * Copy a range of characters from the log buffer. | ||
| 245 | */ | ||
| 246 | int log_buf_copy(char *dest, int idx, int len) | ||
| 247 | { | ||
| 248 | int ret, max; | ||
| 249 | bool took_lock = false; | ||
| 250 | |||
| 251 | if (!oops_in_progress) { | ||
| 252 | spin_lock_irq(&logbuf_lock); | ||
| 253 | took_lock = true; | ||
| 254 | } | ||
| 255 | |||
| 256 | max = log_buf_get_len(); | ||
| 257 | if (idx < 0 || idx >= max) { | ||
| 258 | ret = -1; | ||
| 259 | } else { | ||
| 260 | if (len > max) | ||
| 261 | len = max; | ||
| 262 | ret = len; | ||
| 263 | idx += (log_end - max); | ||
| 264 | while (len-- > 0) | ||
| 265 | dest[len] = LOG_BUF(idx + len); | ||
| 266 | } | ||
| 267 | |||
| 268 | if (took_lock) | ||
| 269 | spin_unlock_irq(&logbuf_lock); | ||
| 270 | |||
| 271 | return ret; | ||
| 272 | } | ||
| 273 | |||
| 274 | /* | ||
| 275 | * Commands to do_syslog: | 236 | * Commands to do_syslog: |
| 276 | * | 237 | * |
| 277 | * 0 -- Close the log. Currently a NOP. | 238 | * 0 -- Close the log. Currently a NOP. |
diff --git a/kernel/resource.c b/kernel/resource.c index 4089d12af6e0..7fec0e427234 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -571,7 +571,7 @@ static void __init __reserve_region_with_split(struct resource *root, | |||
| 571 | 571 | ||
| 572 | } | 572 | } |
| 573 | 573 | ||
| 574 | void reserve_region_with_split(struct resource *root, | 574 | void __init reserve_region_with_split(struct resource *root, |
| 575 | resource_size_t start, resource_size_t end, | 575 | resource_size_t start, resource_size_t end, |
| 576 | const char *name) | 576 | const char *name) |
| 577 | { | 577 | { |
diff --git a/kernel/sched.c b/kernel/sched.c index 6625c3c4b10d..e8819bc6f462 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -386,7 +386,6 @@ struct cfs_rq { | |||
| 386 | 386 | ||
| 387 | u64 exec_clock; | 387 | u64 exec_clock; |
| 388 | u64 min_vruntime; | 388 | u64 min_vruntime; |
| 389 | u64 pair_start; | ||
| 390 | 389 | ||
| 391 | struct rb_root tasks_timeline; | 390 | struct rb_root tasks_timeline; |
| 392 | struct rb_node *rb_leftmost; | 391 | struct rb_node *rb_leftmost; |
| @@ -3344,7 +3343,7 @@ small_imbalance: | |||
| 3344 | } else | 3343 | } else |
| 3345 | this_load_per_task = cpu_avg_load_per_task(this_cpu); | 3344 | this_load_per_task = cpu_avg_load_per_task(this_cpu); |
| 3346 | 3345 | ||
| 3347 | if (max_load - this_load + 2*busiest_load_per_task >= | 3346 | if (max_load - this_load + busiest_load_per_task >= |
| 3348 | busiest_load_per_task * imbn) { | 3347 | busiest_load_per_task * imbn) { |
| 3349 | *imbalance = busiest_load_per_task; | 3348 | *imbalance = busiest_load_per_task; |
| 3350 | return busiest; | 3349 | return busiest; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9573c33688b8..ce514afd78ff 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -143,6 +143,49 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) | |||
| 143 | return se->parent; | 143 | return se->parent; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | /* return depth at which a sched entity is present in the hierarchy */ | ||
| 147 | static inline int depth_se(struct sched_entity *se) | ||
| 148 | { | ||
| 149 | int depth = 0; | ||
| 150 | |||
| 151 | for_each_sched_entity(se) | ||
| 152 | depth++; | ||
| 153 | |||
| 154 | return depth; | ||
| 155 | } | ||
| 156 | |||
| 157 | static void | ||
| 158 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | ||
| 159 | { | ||
| 160 | int se_depth, pse_depth; | ||
| 161 | |||
| 162 | /* | ||
| 163 | * preemption test can be made between sibling entities who are in the | ||
| 164 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | ||
| 165 | * both tasks until we find their ancestors who are siblings of common | ||
| 166 | * parent. | ||
| 167 | */ | ||
| 168 | |||
| 169 | /* First walk up until both entities are at same depth */ | ||
| 170 | se_depth = depth_se(*se); | ||
| 171 | pse_depth = depth_se(*pse); | ||
| 172 | |||
| 173 | while (se_depth > pse_depth) { | ||
| 174 | se_depth--; | ||
| 175 | *se = parent_entity(*se); | ||
| 176 | } | ||
| 177 | |||
| 178 | while (pse_depth > se_depth) { | ||
| 179 | pse_depth--; | ||
| 180 | *pse = parent_entity(*pse); | ||
| 181 | } | ||
| 182 | |||
| 183 | while (!is_same_group(*se, *pse)) { | ||
| 184 | *se = parent_entity(*se); | ||
| 185 | *pse = parent_entity(*pse); | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 146 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 189 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
| 147 | 190 | ||
| 148 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) | 191 | static inline struct rq *rq_of(struct cfs_rq *cfs_rq) |
| @@ -193,6 +236,11 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) | |||
| 193 | return NULL; | 236 | return NULL; |
| 194 | } | 237 | } |
| 195 | 238 | ||
| 239 | static inline void | ||
| 240 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) | ||
| 241 | { | ||
| 242 | } | ||
| 243 | |||
| 196 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 244 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 197 | 245 | ||
| 198 | 246 | ||
| @@ -223,6 +271,27 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 223 | return se->vruntime - cfs_rq->min_vruntime; | 271 | return se->vruntime - cfs_rq->min_vruntime; |
| 224 | } | 272 | } |
| 225 | 273 | ||
| 274 | static void update_min_vruntime(struct cfs_rq *cfs_rq) | ||
| 275 | { | ||
| 276 | u64 vruntime = cfs_rq->min_vruntime; | ||
| 277 | |||
| 278 | if (cfs_rq->curr) | ||
| 279 | vruntime = cfs_rq->curr->vruntime; | ||
| 280 | |||
| 281 | if (cfs_rq->rb_leftmost) { | ||
| 282 | struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, | ||
| 283 | struct sched_entity, | ||
| 284 | run_node); | ||
| 285 | |||
| 286 | if (vruntime == cfs_rq->min_vruntime) | ||
| 287 | vruntime = se->vruntime; | ||
| 288 | else | ||
| 289 | vruntime = min_vruntime(vruntime, se->vruntime); | ||
| 290 | } | ||
| 291 | |||
| 292 | cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); | ||
| 293 | } | ||
| 294 | |||
| 226 | /* | 295 | /* |
| 227 | * Enqueue an entity into the rb-tree: | 296 | * Enqueue an entity into the rb-tree: |
| 228 | */ | 297 | */ |
| @@ -256,15 +325,8 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 256 | * Maintain a cache of leftmost tree entries (it is frequently | 325 | * Maintain a cache of leftmost tree entries (it is frequently |
| 257 | * used): | 326 | * used): |
| 258 | */ | 327 | */ |
| 259 | if (leftmost) { | 328 | if (leftmost) |
| 260 | cfs_rq->rb_leftmost = &se->run_node; | 329 | cfs_rq->rb_leftmost = &se->run_node; |
| 261 | /* | ||
| 262 | * maintain cfs_rq->min_vruntime to be a monotonic increasing | ||
| 263 | * value tracking the leftmost vruntime in the tree. | ||
| 264 | */ | ||
| 265 | cfs_rq->min_vruntime = | ||
| 266 | max_vruntime(cfs_rq->min_vruntime, se->vruntime); | ||
| 267 | } | ||
| 268 | 330 | ||
| 269 | rb_link_node(&se->run_node, parent, link); | 331 | rb_link_node(&se->run_node, parent, link); |
| 270 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | 332 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); |
| @@ -274,18 +336,9 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 274 | { | 336 | { |
| 275 | if (cfs_rq->rb_leftmost == &se->run_node) { | 337 | if (cfs_rq->rb_leftmost == &se->run_node) { |
| 276 | struct rb_node *next_node; | 338 | struct rb_node *next_node; |
| 277 | struct sched_entity *next; | ||
| 278 | 339 | ||
| 279 | next_node = rb_next(&se->run_node); | 340 | next_node = rb_next(&se->run_node); |
| 280 | cfs_rq->rb_leftmost = next_node; | 341 | cfs_rq->rb_leftmost = next_node; |
| 281 | |||
| 282 | if (next_node) { | ||
| 283 | next = rb_entry(next_node, | ||
| 284 | struct sched_entity, run_node); | ||
| 285 | cfs_rq->min_vruntime = | ||
| 286 | max_vruntime(cfs_rq->min_vruntime, | ||
| 287 | next->vruntime); | ||
| 288 | } | ||
| 289 | } | 342 | } |
| 290 | 343 | ||
| 291 | if (cfs_rq->next == se) | 344 | if (cfs_rq->next == se) |
| @@ -424,6 +477,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
| 424 | schedstat_add(cfs_rq, exec_clock, delta_exec); | 477 | schedstat_add(cfs_rq, exec_clock, delta_exec); |
| 425 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); | 478 | delta_exec_weighted = calc_delta_fair(delta_exec, curr); |
| 426 | curr->vruntime += delta_exec_weighted; | 479 | curr->vruntime += delta_exec_weighted; |
| 480 | update_min_vruntime(cfs_rq); | ||
| 427 | } | 481 | } |
| 428 | 482 | ||
| 429 | static void update_curr(struct cfs_rq *cfs_rq) | 483 | static void update_curr(struct cfs_rq *cfs_rq) |
| @@ -613,13 +667,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 613 | static void | 667 | static void |
| 614 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 668 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) |
| 615 | { | 669 | { |
| 616 | u64 vruntime; | 670 | u64 vruntime = cfs_rq->min_vruntime; |
| 617 | |||
| 618 | if (first_fair(cfs_rq)) { | ||
| 619 | vruntime = min_vruntime(cfs_rq->min_vruntime, | ||
| 620 | __pick_next_entity(cfs_rq)->vruntime); | ||
| 621 | } else | ||
| 622 | vruntime = cfs_rq->min_vruntime; | ||
| 623 | 671 | ||
| 624 | /* | 672 | /* |
| 625 | * The 'current' period is already promised to the current tasks, | 673 | * The 'current' period is already promised to the current tasks, |
| @@ -696,6 +744,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
| 696 | if (se != cfs_rq->curr) | 744 | if (se != cfs_rq->curr) |
| 697 | __dequeue_entity(cfs_rq, se); | 745 | __dequeue_entity(cfs_rq, se); |
| 698 | account_entity_dequeue(cfs_rq, se); | 746 | account_entity_dequeue(cfs_rq, se); |
| 747 | update_min_vruntime(cfs_rq); | ||
| 699 | } | 748 | } |
| 700 | 749 | ||
| 701 | /* | 750 | /* |
| @@ -742,16 +791,14 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 742 | se->prev_sum_exec_runtime = se->sum_exec_runtime; | 791 | se->prev_sum_exec_runtime = se->sum_exec_runtime; |
| 743 | } | 792 | } |
| 744 | 793 | ||
| 794 | static int | ||
| 795 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | ||
| 796 | |||
| 745 | static struct sched_entity * | 797 | static struct sched_entity * |
| 746 | pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) | 798 | pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 747 | { | 799 | { |
| 748 | struct rq *rq = rq_of(cfs_rq); | 800 | if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1) |
| 749 | u64 pair_slice = rq->clock - cfs_rq->pair_start; | ||
| 750 | |||
| 751 | if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) { | ||
| 752 | cfs_rq->pair_start = rq->clock; | ||
| 753 | return se; | 801 | return se; |
| 754 | } | ||
| 755 | 802 | ||
| 756 | return cfs_rq->next; | 803 | return cfs_rq->next; |
| 757 | } | 804 | } |
| @@ -1122,10 +1169,9 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq, | |||
| 1122 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 1169 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) |
| 1123 | return 0; | 1170 | return 0; |
| 1124 | 1171 | ||
| 1125 | if (!sync && sched_feat(SYNC_WAKEUPS) && | 1172 | if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost || |
| 1126 | curr->se.avg_overlap < sysctl_sched_migration_cost && | 1173 | p->se.avg_overlap > sysctl_sched_migration_cost)) |
| 1127 | p->se.avg_overlap < sysctl_sched_migration_cost) | 1174 | sync = 0; |
| 1128 | sync = 1; | ||
| 1129 | 1175 | ||
| 1130 | /* | 1176 | /* |
| 1131 | * If sync wakeup then subtract the (maximum possible) | 1177 | * If sync wakeup then subtract the (maximum possible) |
| @@ -1244,13 +1290,42 @@ static unsigned long wakeup_gran(struct sched_entity *se) | |||
| 1244 | * More easily preempt - nice tasks, while not making it harder for | 1290 | * More easily preempt - nice tasks, while not making it harder for |
| 1245 | * + nice tasks. | 1291 | * + nice tasks. |
| 1246 | */ | 1292 | */ |
| 1247 | if (sched_feat(ASYM_GRAN)) | 1293 | if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) |
| 1248 | gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); | 1294 | gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); |
| 1249 | 1295 | ||
| 1250 | return gran; | 1296 | return gran; |
| 1251 | } | 1297 | } |
| 1252 | 1298 | ||
| 1253 | /* | 1299 | /* |
| 1300 | * Should 'se' preempt 'curr'. | ||
| 1301 | * | ||
| 1302 | * |s1 | ||
| 1303 | * |s2 | ||
| 1304 | * |s3 | ||
| 1305 | * g | ||
| 1306 | * |<--->|c | ||
| 1307 | * | ||
| 1308 | * w(c, s1) = -1 | ||
| 1309 | * w(c, s2) = 0 | ||
| 1310 | * w(c, s3) = 1 | ||
| 1311 | * | ||
| 1312 | */ | ||
| 1313 | static int | ||
| 1314 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | ||
| 1315 | { | ||
| 1316 | s64 gran, vdiff = curr->vruntime - se->vruntime; | ||
| 1317 | |||
| 1318 | if (vdiff <= 0) | ||
| 1319 | return -1; | ||
| 1320 | |||
| 1321 | gran = wakeup_gran(curr); | ||
| 1322 | if (vdiff > gran) | ||
| 1323 | return 1; | ||
| 1324 | |||
| 1325 | return 0; | ||
| 1326 | } | ||
| 1327 | |||
| 1328 | /* | ||
| 1254 | * Preempt the current task with a newly woken task if needed: | 1329 | * Preempt the current task with a newly woken task if needed: |
| 1255 | */ | 1330 | */ |
| 1256 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | 1331 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) |
| @@ -1258,7 +1333,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
| 1258 | struct task_struct *curr = rq->curr; | 1333 | struct task_struct *curr = rq->curr; |
| 1259 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1334 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
| 1260 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1335 | struct sched_entity *se = &curr->se, *pse = &p->se; |
| 1261 | s64 delta_exec; | ||
| 1262 | 1336 | ||
| 1263 | if (unlikely(rt_prio(p->prio))) { | 1337 | if (unlikely(rt_prio(p->prio))) { |
| 1264 | update_rq_clock(rq); | 1338 | update_rq_clock(rq); |
| @@ -1296,9 +1370,19 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
| 1296 | return; | 1370 | return; |
| 1297 | } | 1371 | } |
| 1298 | 1372 | ||
| 1299 | delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; | 1373 | find_matching_se(&se, &pse); |
| 1300 | if (delta_exec > wakeup_gran(pse)) | 1374 | |
| 1301 | resched_task(curr); | 1375 | while (se) { |
| 1376 | BUG_ON(!pse); | ||
| 1377 | |||
| 1378 | if (wakeup_preempt_entity(se, pse) == 1) { | ||
| 1379 | resched_task(curr); | ||
| 1380 | break; | ||
| 1381 | } | ||
| 1382 | |||
| 1383 | se = parent_entity(se); | ||
| 1384 | pse = parent_entity(pse); | ||
| 1385 | } | ||
| 1302 | } | 1386 | } |
| 1303 | 1387 | ||
| 1304 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 1388 | static struct task_struct *pick_next_task_fair(struct rq *rq) |
| @@ -1594,9 +1678,6 @@ static const struct sched_class fair_sched_class = { | |||
| 1594 | .enqueue_task = enqueue_task_fair, | 1678 | .enqueue_task = enqueue_task_fair, |
| 1595 | .dequeue_task = dequeue_task_fair, | 1679 | .dequeue_task = dequeue_task_fair, |
| 1596 | .yield_task = yield_task_fair, | 1680 | .yield_task = yield_task_fair, |
| 1597 | #ifdef CONFIG_SMP | ||
| 1598 | .select_task_rq = select_task_rq_fair, | ||
| 1599 | #endif /* CONFIG_SMP */ | ||
| 1600 | 1681 | ||
| 1601 | .check_preempt_curr = check_preempt_wakeup, | 1682 | .check_preempt_curr = check_preempt_wakeup, |
| 1602 | 1683 | ||
| @@ -1604,6 +1685,8 @@ static const struct sched_class fair_sched_class = { | |||
| 1604 | .put_prev_task = put_prev_task_fair, | 1685 | .put_prev_task = put_prev_task_fair, |
| 1605 | 1686 | ||
| 1606 | #ifdef CONFIG_SMP | 1687 | #ifdef CONFIG_SMP |
| 1688 | .select_task_rq = select_task_rq_fair, | ||
| 1689 | |||
| 1607 | .load_balance = load_balance_fair, | 1690 | .load_balance = load_balance_fair, |
| 1608 | .move_one_task = move_one_task_fair, | 1691 | .move_one_task = move_one_task_fair, |
| 1609 | #endif | 1692 | #endif |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index dec4ccabe2f5..8a21a2e28c13 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
| @@ -105,9 +105,6 @@ static const struct sched_class idle_sched_class = { | |||
| 105 | 105 | ||
| 106 | /* dequeue is not valid, we print a debug message there: */ | 106 | /* dequeue is not valid, we print a debug message there: */ |
| 107 | .dequeue_task = dequeue_task_idle, | 107 | .dequeue_task = dequeue_task_idle, |
| 108 | #ifdef CONFIG_SMP | ||
| 109 | .select_task_rq = select_task_rq_idle, | ||
| 110 | #endif /* CONFIG_SMP */ | ||
| 111 | 108 | ||
| 112 | .check_preempt_curr = check_preempt_curr_idle, | 109 | .check_preempt_curr = check_preempt_curr_idle, |
| 113 | 110 | ||
| @@ -115,6 +112,8 @@ static const struct sched_class idle_sched_class = { | |||
| 115 | .put_prev_task = put_prev_task_idle, | 112 | .put_prev_task = put_prev_task_idle, |
| 116 | 113 | ||
| 117 | #ifdef CONFIG_SMP | 114 | #ifdef CONFIG_SMP |
| 115 | .select_task_rq = select_task_rq_idle, | ||
| 116 | |||
| 118 | .load_balance = load_balance_idle, | 117 | .load_balance = load_balance_idle, |
| 119 | .move_one_task = move_one_task_idle, | 118 | .move_one_task = move_one_task_idle, |
| 120 | #endif | 119 | #endif |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index b446dc87494f..d9ba9d5f99d6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -1504,9 +1504,6 @@ static const struct sched_class rt_sched_class = { | |||
| 1504 | .enqueue_task = enqueue_task_rt, | 1504 | .enqueue_task = enqueue_task_rt, |
| 1505 | .dequeue_task = dequeue_task_rt, | 1505 | .dequeue_task = dequeue_task_rt, |
| 1506 | .yield_task = yield_task_rt, | 1506 | .yield_task = yield_task_rt, |
| 1507 | #ifdef CONFIG_SMP | ||
| 1508 | .select_task_rq = select_task_rq_rt, | ||
| 1509 | #endif /* CONFIG_SMP */ | ||
| 1510 | 1507 | ||
| 1511 | .check_preempt_curr = check_preempt_curr_rt, | 1508 | .check_preempt_curr = check_preempt_curr_rt, |
| 1512 | 1509 | ||
| @@ -1514,6 +1511,8 @@ static const struct sched_class rt_sched_class = { | |||
| 1514 | .put_prev_task = put_prev_task_rt, | 1511 | .put_prev_task = put_prev_task_rt, |
| 1515 | 1512 | ||
| 1516 | #ifdef CONFIG_SMP | 1513 | #ifdef CONFIG_SMP |
| 1514 | .select_task_rq = select_task_rq_rt, | ||
| 1515 | |||
| 1517 | .load_balance = load_balance_rt, | 1516 | .load_balance = load_balance_rt, |
| 1518 | .move_one_task = move_one_task_rt, | 1517 | .move_one_task = move_one_task_rt, |
| 1519 | .set_cpus_allowed = set_cpus_allowed_rt, | 1518 | .set_cpus_allowed = set_cpus_allowed_rt, |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index a13bd4dfaeb1..9d048fa2d902 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -474,7 +474,7 @@ static struct ctl_table kern_table[] = { | |||
| 474 | .mode = 0644, | 474 | .mode = 0644, |
| 475 | .proc_handler = &proc_dointvec, | 475 | .proc_handler = &proc_dointvec, |
| 476 | }, | 476 | }, |
| 477 | #ifdef CONFIG_FTRACE | 477 | #ifdef CONFIG_FUNCTION_TRACER |
| 478 | { | 478 | { |
| 479 | .ctl_name = CTL_UNNUMBERED, | 479 | .ctl_name = CTL_UNNUMBERED, |
| 480 | .procname = "ftrace_enabled", | 480 | .procname = "ftrace_enabled", |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1cb3e1f616af..e0cea282e0c5 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -1,11 +1,12 @@ | |||
| 1 | # | 1 | # |
| 2 | # Architectures that offer an FTRACE implementation should select HAVE_FTRACE: | 2 | # Architectures that offer an FUNCTION_TRACER implementation should |
| 3 | # select HAVE_FUNCTION_TRACER: | ||
| 3 | # | 4 | # |
| 4 | 5 | ||
| 5 | config NOP_TRACER | 6 | config NOP_TRACER |
| 6 | bool | 7 | bool |
| 7 | 8 | ||
| 8 | config HAVE_FTRACE | 9 | config HAVE_FUNCTION_TRACER |
| 9 | bool | 10 | bool |
| 10 | select NOP_TRACER | 11 | select NOP_TRACER |
| 11 | 12 | ||
| @@ -28,9 +29,11 @@ config TRACING | |||
| 28 | select STACKTRACE | 29 | select STACKTRACE |
| 29 | select TRACEPOINTS | 30 | select TRACEPOINTS |
| 30 | 31 | ||
| 31 | config FTRACE | 32 | menu "Tracers" |
| 33 | |||
| 34 | config FUNCTION_TRACER | ||
| 32 | bool "Kernel Function Tracer" | 35 | bool "Kernel Function Tracer" |
| 33 | depends on HAVE_FTRACE | 36 | depends on HAVE_FUNCTION_TRACER |
| 34 | depends on DEBUG_KERNEL | 37 | depends on DEBUG_KERNEL |
| 35 | select FRAME_POINTER | 38 | select FRAME_POINTER |
| 36 | select TRACING | 39 | select TRACING |
| @@ -49,7 +52,6 @@ config IRQSOFF_TRACER | |||
| 49 | default n | 52 | default n |
| 50 | depends on TRACE_IRQFLAGS_SUPPORT | 53 | depends on TRACE_IRQFLAGS_SUPPORT |
| 51 | depends on GENERIC_TIME | 54 | depends on GENERIC_TIME |
| 52 | depends on HAVE_FTRACE | ||
| 53 | depends on DEBUG_KERNEL | 55 | depends on DEBUG_KERNEL |
| 54 | select TRACE_IRQFLAGS | 56 | select TRACE_IRQFLAGS |
| 55 | select TRACING | 57 | select TRACING |
| @@ -73,7 +75,6 @@ config PREEMPT_TRACER | |||
| 73 | default n | 75 | default n |
| 74 | depends on GENERIC_TIME | 76 | depends on GENERIC_TIME |
| 75 | depends on PREEMPT | 77 | depends on PREEMPT |
| 76 | depends on HAVE_FTRACE | ||
| 77 | depends on DEBUG_KERNEL | 78 | depends on DEBUG_KERNEL |
| 78 | select TRACING | 79 | select TRACING |
| 79 | select TRACER_MAX_TRACE | 80 | select TRACER_MAX_TRACE |
| @@ -101,7 +102,6 @@ config SYSPROF_TRACER | |||
| 101 | 102 | ||
| 102 | config SCHED_TRACER | 103 | config SCHED_TRACER |
| 103 | bool "Scheduling Latency Tracer" | 104 | bool "Scheduling Latency Tracer" |
| 104 | depends on HAVE_FTRACE | ||
| 105 | depends on DEBUG_KERNEL | 105 | depends on DEBUG_KERNEL |
| 106 | select TRACING | 106 | select TRACING |
| 107 | select CONTEXT_SWITCH_TRACER | 107 | select CONTEXT_SWITCH_TRACER |
| @@ -112,7 +112,6 @@ config SCHED_TRACER | |||
| 112 | 112 | ||
| 113 | config CONTEXT_SWITCH_TRACER | 113 | config CONTEXT_SWITCH_TRACER |
| 114 | bool "Trace process context switches" | 114 | bool "Trace process context switches" |
| 115 | depends on HAVE_FTRACE | ||
| 116 | depends on DEBUG_KERNEL | 115 | depends on DEBUG_KERNEL |
| 117 | select TRACING | 116 | select TRACING |
| 118 | select MARKERS | 117 | select MARKERS |
| @@ -122,9 +121,9 @@ config CONTEXT_SWITCH_TRACER | |||
| 122 | 121 | ||
| 123 | config BOOT_TRACER | 122 | config BOOT_TRACER |
| 124 | bool "Trace boot initcalls" | 123 | bool "Trace boot initcalls" |
| 125 | depends on HAVE_FTRACE | ||
| 126 | depends on DEBUG_KERNEL | 124 | depends on DEBUG_KERNEL |
| 127 | select TRACING | 125 | select TRACING |
| 126 | select CONTEXT_SWITCH_TRACER | ||
| 128 | help | 127 | help |
| 129 | This tracer helps developers to optimize boot times: it records | 128 | This tracer helps developers to optimize boot times: it records |
| 130 | the timings of the initcalls and traces key events and the identity | 129 | the timings of the initcalls and traces key events and the identity |
| @@ -141,9 +140,9 @@ config BOOT_TRACER | |||
| 141 | 140 | ||
| 142 | config STACK_TRACER | 141 | config STACK_TRACER |
| 143 | bool "Trace max stack" | 142 | bool "Trace max stack" |
| 144 | depends on HAVE_FTRACE | 143 | depends on HAVE_FUNCTION_TRACER |
| 145 | depends on DEBUG_KERNEL | 144 | depends on DEBUG_KERNEL |
| 146 | select FTRACE | 145 | select FUNCTION_TRACER |
| 147 | select STACKTRACE | 146 | select STACKTRACE |
| 148 | help | 147 | help |
| 149 | This special tracer records the maximum stack footprint of the | 148 | This special tracer records the maximum stack footprint of the |
| @@ -160,7 +159,7 @@ config STACK_TRACER | |||
| 160 | 159 | ||
| 161 | config DYNAMIC_FTRACE | 160 | config DYNAMIC_FTRACE |
| 162 | bool "enable/disable ftrace tracepoints dynamically" | 161 | bool "enable/disable ftrace tracepoints dynamically" |
| 163 | depends on FTRACE | 162 | depends on FUNCTION_TRACER |
| 164 | depends on HAVE_DYNAMIC_FTRACE | 163 | depends on HAVE_DYNAMIC_FTRACE |
| 165 | depends on DEBUG_KERNEL | 164 | depends on DEBUG_KERNEL |
| 166 | default y | 165 | default y |
| @@ -170,7 +169,7 @@ config DYNAMIC_FTRACE | |||
| 170 | with a No-Op instruction) as they are called. A table is | 169 | with a No-Op instruction) as they are called. A table is |
| 171 | created to dynamically enable them again. | 170 | created to dynamically enable them again. |
| 172 | 171 | ||
| 173 | This way a CONFIG_FTRACE kernel is slightly larger, but otherwise | 172 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise |
| 174 | has native performance as long as no tracing is active. | 173 | has native performance as long as no tracing is active. |
| 175 | 174 | ||
| 176 | The changes to the code are done by a kernel thread that | 175 | The changes to the code are done by a kernel thread that |
| @@ -195,3 +194,5 @@ config FTRACE_STARTUP_TEST | |||
| 195 | a series of tests are made to verify that the tracer is | 194 | a series of tests are made to verify that the tracer is |
| 196 | functioning properly. It will do tests on all the configured | 195 | functioning properly. It will do tests on all the configured |
| 197 | tracers of ftrace. | 196 | tracers of ftrace. |
| 197 | |||
| 198 | endmenu | ||
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index a85dfba88ba0..c8228b1a49e9 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | 1 | ||
| 2 | # Do not instrument the tracer itself: | 2 | # Do not instrument the tracer itself: |
| 3 | 3 | ||
| 4 | ifdef CONFIG_FTRACE | 4 | ifdef CONFIG_FUNCTION_TRACER |
| 5 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 5 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
| 6 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) | 6 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) |
| 7 | 7 | ||
| @@ -10,13 +10,13 @@ CFLAGS_trace_selftest_dynamic.o = -pg | |||
| 10 | obj-y += trace_selftest_dynamic.o | 10 | obj-y += trace_selftest_dynamic.o |
| 11 | endif | 11 | endif |
| 12 | 12 | ||
| 13 | obj-$(CONFIG_FTRACE) += libftrace.o | 13 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o |
| 14 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 14 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
| 15 | 15 | ||
| 16 | obj-$(CONFIG_TRACING) += trace.o | 16 | obj-$(CONFIG_TRACING) += trace.o |
| 17 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 17 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
| 18 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | 18 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o |
| 19 | obj-$(CONFIG_FTRACE) += trace_functions.o | 19 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
| 20 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | 20 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o |
| 21 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | 21 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o |
| 22 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | 22 | obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4dda4f60a2a9..7618c528756b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -25,13 +25,24 @@ | |||
| 25 | #include <linux/ftrace.h> | 25 | #include <linux/ftrace.h> |
| 26 | #include <linux/sysctl.h> | 26 | #include <linux/sysctl.h> |
| 27 | #include <linux/ctype.h> | 27 | #include <linux/ctype.h> |
| 28 | #include <linux/hash.h> | ||
| 29 | #include <linux/list.h> | 28 | #include <linux/list.h> |
| 30 | 29 | ||
| 31 | #include <asm/ftrace.h> | 30 | #include <asm/ftrace.h> |
| 32 | 31 | ||
| 33 | #include "trace.h" | 32 | #include "trace.h" |
| 34 | 33 | ||
| 34 | #define FTRACE_WARN_ON(cond) \ | ||
| 35 | do { \ | ||
| 36 | if (WARN_ON(cond)) \ | ||
| 37 | ftrace_kill(); \ | ||
| 38 | } while (0) | ||
| 39 | |||
| 40 | #define FTRACE_WARN_ON_ONCE(cond) \ | ||
| 41 | do { \ | ||
| 42 | if (WARN_ON_ONCE(cond)) \ | ||
| 43 | ftrace_kill(); \ | ||
| 44 | } while (0) | ||
| 45 | |||
| 35 | /* ftrace_enabled is a method to turn ftrace on or off */ | 46 | /* ftrace_enabled is a method to turn ftrace on or off */ |
| 36 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
| 37 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
| @@ -153,21 +164,8 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
| 153 | } | 164 | } |
| 154 | 165 | ||
| 155 | #ifdef CONFIG_DYNAMIC_FTRACE | 166 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 156 | |||
| 157 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 167 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
| 158 | /* | 168 | # error Dynamic ftrace depends on MCOUNT_RECORD |
| 159 | * The hash lock is only needed when the recording of the mcount | ||
| 160 | * callers are dynamic. That is, by the caller themselves and | ||
| 161 | * not recorded via the compilation. | ||
| 162 | */ | ||
| 163 | static DEFINE_SPINLOCK(ftrace_hash_lock); | ||
| 164 | #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) | ||
| 165 | #define ftrace_hash_unlock(flags) \ | ||
| 166 | spin_unlock_irqrestore(&ftrace_hash_lock, flags) | ||
| 167 | #else | ||
| 168 | /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ | ||
| 169 | #define ftrace_hash_lock(flags) do { (void)(flags); } while (0) | ||
| 170 | #define ftrace_hash_unlock(flags) do { } while(0) | ||
| 171 | #endif | 169 | #endif |
| 172 | 170 | ||
| 173 | /* | 171 | /* |
| @@ -178,8 +176,6 @@ static DEFINE_SPINLOCK(ftrace_hash_lock); | |||
| 178 | */ | 176 | */ |
| 179 | static unsigned long mcount_addr = MCOUNT_ADDR; | 177 | static unsigned long mcount_addr = MCOUNT_ADDR; |
| 180 | 178 | ||
| 181 | static struct task_struct *ftraced_task; | ||
| 182 | |||
| 183 | enum { | 179 | enum { |
| 184 | FTRACE_ENABLE_CALLS = (1 << 0), | 180 | FTRACE_ENABLE_CALLS = (1 << 0), |
| 185 | FTRACE_DISABLE_CALLS = (1 << 1), | 181 | FTRACE_DISABLE_CALLS = (1 << 1), |
| @@ -190,13 +186,9 @@ enum { | |||
| 190 | 186 | ||
| 191 | static int ftrace_filtered; | 187 | static int ftrace_filtered; |
| 192 | static int tracing_on; | 188 | static int tracing_on; |
| 193 | static int frozen_record_count; | ||
| 194 | 189 | ||
| 195 | static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; | 190 | static LIST_HEAD(ftrace_new_addrs); |
| 196 | 191 | ||
| 197 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | ||
| 198 | |||
| 199 | static DEFINE_MUTEX(ftraced_lock); | ||
| 200 | static DEFINE_MUTEX(ftrace_regex_lock); | 192 | static DEFINE_MUTEX(ftrace_regex_lock); |
| 201 | 193 | ||
| 202 | struct ftrace_page { | 194 | struct ftrace_page { |
| @@ -214,16 +206,13 @@ struct ftrace_page { | |||
| 214 | static struct ftrace_page *ftrace_pages_start; | 206 | static struct ftrace_page *ftrace_pages_start; |
| 215 | static struct ftrace_page *ftrace_pages; | 207 | static struct ftrace_page *ftrace_pages; |
| 216 | 208 | ||
| 217 | static int ftraced_trigger; | ||
| 218 | static int ftraced_suspend; | ||
| 219 | static int ftraced_stop; | ||
| 220 | |||
| 221 | static int ftrace_record_suspend; | ||
| 222 | |||
| 223 | static struct dyn_ftrace *ftrace_free_records; | 209 | static struct dyn_ftrace *ftrace_free_records; |
| 224 | 210 | ||
| 225 | 211 | ||
| 226 | #ifdef CONFIG_KPROBES | 212 | #ifdef CONFIG_KPROBES |
| 213 | |||
| 214 | static int frozen_record_count; | ||
| 215 | |||
| 227 | static inline void freeze_record(struct dyn_ftrace *rec) | 216 | static inline void freeze_record(struct dyn_ftrace *rec) |
| 228 | { | 217 | { |
| 229 | if (!(rec->flags & FTRACE_FL_FROZEN)) { | 218 | if (!(rec->flags & FTRACE_FL_FROZEN)) { |
| @@ -250,72 +239,6 @@ static inline int record_frozen(struct dyn_ftrace *rec) | |||
| 250 | # define record_frozen(rec) ({ 0; }) | 239 | # define record_frozen(rec) ({ 0; }) |
| 251 | #endif /* CONFIG_KPROBES */ | 240 | #endif /* CONFIG_KPROBES */ |
| 252 | 241 | ||
| 253 | int skip_trace(unsigned long ip) | ||
| 254 | { | ||
| 255 | unsigned long fl; | ||
| 256 | struct dyn_ftrace *rec; | ||
| 257 | struct hlist_node *t; | ||
| 258 | struct hlist_head *head; | ||
| 259 | |||
| 260 | if (frozen_record_count == 0) | ||
| 261 | return 0; | ||
| 262 | |||
| 263 | head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)]; | ||
| 264 | hlist_for_each_entry_rcu(rec, t, head, node) { | ||
| 265 | if (rec->ip == ip) { | ||
| 266 | if (record_frozen(rec)) { | ||
| 267 | if (rec->flags & FTRACE_FL_FAILED) | ||
| 268 | return 1; | ||
| 269 | |||
| 270 | if (!(rec->flags & FTRACE_FL_CONVERTED)) | ||
| 271 | return 1; | ||
| 272 | |||
| 273 | if (!tracing_on || !ftrace_enabled) | ||
| 274 | return 1; | ||
| 275 | |||
| 276 | if (ftrace_filtered) { | ||
| 277 | fl = rec->flags & (FTRACE_FL_FILTER | | ||
| 278 | FTRACE_FL_NOTRACE); | ||
| 279 | if (!fl || (fl & FTRACE_FL_NOTRACE)) | ||
| 280 | return 1; | ||
| 281 | } | ||
| 282 | } | ||
| 283 | break; | ||
| 284 | } | ||
| 285 | } | ||
| 286 | |||
| 287 | return 0; | ||
| 288 | } | ||
| 289 | |||
| 290 | static inline int | ||
| 291 | ftrace_ip_in_hash(unsigned long ip, unsigned long key) | ||
| 292 | { | ||
| 293 | struct dyn_ftrace *p; | ||
| 294 | struct hlist_node *t; | ||
| 295 | int found = 0; | ||
| 296 | |||
| 297 | hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) { | ||
| 298 | if (p->ip == ip) { | ||
| 299 | found = 1; | ||
| 300 | break; | ||
| 301 | } | ||
| 302 | } | ||
| 303 | |||
| 304 | return found; | ||
| 305 | } | ||
| 306 | |||
| 307 | static inline void | ||
| 308 | ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) | ||
| 309 | { | ||
| 310 | hlist_add_head_rcu(&node->node, &ftrace_hash[key]); | ||
| 311 | } | ||
| 312 | |||
| 313 | /* called from kstop_machine */ | ||
| 314 | static inline void ftrace_del_hash(struct dyn_ftrace *node) | ||
| 315 | { | ||
| 316 | hlist_del(&node->node); | ||
| 317 | } | ||
| 318 | |||
| 319 | static void ftrace_free_rec(struct dyn_ftrace *rec) | 242 | static void ftrace_free_rec(struct dyn_ftrace *rec) |
| 320 | { | 243 | { |
| 321 | rec->ip = (unsigned long)ftrace_free_records; | 244 | rec->ip = (unsigned long)ftrace_free_records; |
| @@ -346,7 +269,6 @@ void ftrace_release(void *start, unsigned long size) | |||
| 346 | } | 269 | } |
| 347 | } | 270 | } |
| 348 | spin_unlock(&ftrace_lock); | 271 | spin_unlock(&ftrace_lock); |
| 349 | |||
| 350 | } | 272 | } |
| 351 | 273 | ||
| 352 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 274 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
| @@ -358,10 +280,8 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | |||
| 358 | rec = ftrace_free_records; | 280 | rec = ftrace_free_records; |
| 359 | 281 | ||
| 360 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { | 282 | if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { |
| 361 | WARN_ON_ONCE(1); | 283 | FTRACE_WARN_ON_ONCE(1); |
| 362 | ftrace_free_records = NULL; | 284 | ftrace_free_records = NULL; |
| 363 | ftrace_disabled = 1; | ||
| 364 | ftrace_enabled = 0; | ||
| 365 | return NULL; | 285 | return NULL; |
| 366 | } | 286 | } |
| 367 | 287 | ||
| @@ -371,76 +291,36 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | |||
| 371 | } | 291 | } |
| 372 | 292 | ||
| 373 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { | 293 | if (ftrace_pages->index == ENTRIES_PER_PAGE) { |
| 374 | if (!ftrace_pages->next) | 294 | if (!ftrace_pages->next) { |
| 375 | return NULL; | 295 | /* allocate another page */ |
| 296 | ftrace_pages->next = | ||
| 297 | (void *)get_zeroed_page(GFP_KERNEL); | ||
| 298 | if (!ftrace_pages->next) | ||
| 299 | return NULL; | ||
| 300 | } | ||
| 376 | ftrace_pages = ftrace_pages->next; | 301 | ftrace_pages = ftrace_pages->next; |
| 377 | } | 302 | } |
| 378 | 303 | ||
| 379 | return &ftrace_pages->records[ftrace_pages->index++]; | 304 | return &ftrace_pages->records[ftrace_pages->index++]; |
| 380 | } | 305 | } |
| 381 | 306 | ||
| 382 | static void | 307 | static struct dyn_ftrace * |
| 383 | ftrace_record_ip(unsigned long ip) | 308 | ftrace_record_ip(unsigned long ip) |
| 384 | { | 309 | { |
| 385 | struct dyn_ftrace *node; | 310 | struct dyn_ftrace *rec; |
| 386 | unsigned long flags; | ||
| 387 | unsigned long key; | ||
| 388 | int resched; | ||
| 389 | int cpu; | ||
| 390 | 311 | ||
| 391 | if (!ftrace_enabled || ftrace_disabled) | 312 | if (!ftrace_enabled || ftrace_disabled) |
| 392 | return; | 313 | return NULL; |
| 393 | |||
| 394 | resched = need_resched(); | ||
| 395 | preempt_disable_notrace(); | ||
| 396 | |||
| 397 | /* | ||
| 398 | * We simply need to protect against recursion. | ||
| 399 | * Use the the raw version of smp_processor_id and not | ||
| 400 | * __get_cpu_var which can call debug hooks that can | ||
| 401 | * cause a recursive crash here. | ||
| 402 | */ | ||
| 403 | cpu = raw_smp_processor_id(); | ||
| 404 | per_cpu(ftrace_shutdown_disable_cpu, cpu)++; | ||
| 405 | if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1) | ||
| 406 | goto out; | ||
| 407 | 314 | ||
| 408 | if (unlikely(ftrace_record_suspend)) | 315 | rec = ftrace_alloc_dyn_node(ip); |
| 409 | goto out; | 316 | if (!rec) |
| 410 | 317 | return NULL; | |
| 411 | key = hash_long(ip, FTRACE_HASHBITS); | ||
| 412 | |||
| 413 | WARN_ON_ONCE(key >= FTRACE_HASHSIZE); | ||
| 414 | |||
| 415 | if (ftrace_ip_in_hash(ip, key)) | ||
| 416 | goto out; | ||
| 417 | |||
| 418 | ftrace_hash_lock(flags); | ||
| 419 | |||
| 420 | /* This ip may have hit the hash before the lock */ | ||
| 421 | if (ftrace_ip_in_hash(ip, key)) | ||
| 422 | goto out_unlock; | ||
| 423 | |||
| 424 | node = ftrace_alloc_dyn_node(ip); | ||
| 425 | if (!node) | ||
| 426 | goto out_unlock; | ||
| 427 | |||
| 428 | node->ip = ip; | ||
| 429 | 318 | ||
| 430 | ftrace_add_hash(node, key); | 319 | rec->ip = ip; |
| 431 | 320 | ||
| 432 | ftraced_trigger = 1; | 321 | list_add(&rec->list, &ftrace_new_addrs); |
| 433 | 322 | ||
| 434 | out_unlock: | 323 | return rec; |
| 435 | ftrace_hash_unlock(flags); | ||
| 436 | out: | ||
| 437 | per_cpu(ftrace_shutdown_disable_cpu, cpu)--; | ||
| 438 | |||
| 439 | /* prevent recursion with scheduler */ | ||
| 440 | if (resched) | ||
| 441 | preempt_enable_no_resched_notrace(); | ||
| 442 | else | ||
| 443 | preempt_enable_notrace(); | ||
| 444 | } | 324 | } |
| 445 | 325 | ||
| 446 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 326 | #define FTRACE_ADDR ((long)(ftrace_caller)) |
| @@ -559,7 +439,6 @@ static void ftrace_replace_code(int enable) | |||
| 559 | rec->flags |= FTRACE_FL_FAILED; | 439 | rec->flags |= FTRACE_FL_FAILED; |
| 560 | if ((system_state == SYSTEM_BOOTING) || | 440 | if ((system_state == SYSTEM_BOOTING) || |
| 561 | !core_kernel_text(rec->ip)) { | 441 | !core_kernel_text(rec->ip)) { |
| 562 | ftrace_del_hash(rec); | ||
| 563 | ftrace_free_rec(rec); | 442 | ftrace_free_rec(rec); |
| 564 | } | 443 | } |
| 565 | } | 444 | } |
| @@ -567,15 +446,6 @@ static void ftrace_replace_code(int enable) | |||
| 567 | } | 446 | } |
| 568 | } | 447 | } |
| 569 | 448 | ||
| 570 | static void ftrace_shutdown_replenish(void) | ||
| 571 | { | ||
| 572 | if (ftrace_pages->next) | ||
| 573 | return; | ||
| 574 | |||
| 575 | /* allocate another page */ | ||
| 576 | ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); | ||
| 577 | } | ||
| 578 | |||
| 579 | static void print_ip_ins(const char *fmt, unsigned char *p) | 449 | static void print_ip_ins(const char *fmt, unsigned char *p) |
| 580 | { | 450 | { |
| 581 | int i; | 451 | int i; |
| @@ -591,23 +461,23 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
| 591 | { | 461 | { |
| 592 | unsigned long ip; | 462 | unsigned long ip; |
| 593 | unsigned char *nop, *call; | 463 | unsigned char *nop, *call; |
| 594 | int failed; | 464 | int ret; |
| 595 | 465 | ||
| 596 | ip = rec->ip; | 466 | ip = rec->ip; |
| 597 | 467 | ||
| 598 | nop = ftrace_nop_replace(); | 468 | nop = ftrace_nop_replace(); |
| 599 | call = ftrace_call_replace(ip, mcount_addr); | 469 | call = ftrace_call_replace(ip, mcount_addr); |
| 600 | 470 | ||
| 601 | failed = ftrace_modify_code(ip, call, nop); | 471 | ret = ftrace_modify_code(ip, call, nop); |
| 602 | if (failed) { | 472 | if (ret) { |
| 603 | switch (failed) { | 473 | switch (ret) { |
| 604 | case 1: | 474 | case -EFAULT: |
| 605 | WARN_ON_ONCE(1); | 475 | FTRACE_WARN_ON_ONCE(1); |
| 606 | pr_info("ftrace faulted on modifying "); | 476 | pr_info("ftrace faulted on modifying "); |
| 607 | print_ip_sym(ip); | 477 | print_ip_sym(ip); |
| 608 | break; | 478 | break; |
| 609 | case 2: | 479 | case -EINVAL: |
| 610 | WARN_ON_ONCE(1); | 480 | FTRACE_WARN_ON_ONCE(1); |
| 611 | pr_info("ftrace failed to modify "); | 481 | pr_info("ftrace failed to modify "); |
| 612 | print_ip_sym(ip); | 482 | print_ip_sym(ip); |
| 613 | print_ip_ins(" expected: ", call); | 483 | print_ip_ins(" expected: ", call); |
| @@ -615,6 +485,15 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
| 615 | print_ip_ins(" replace: ", nop); | 485 | print_ip_ins(" replace: ", nop); |
| 616 | printk(KERN_CONT "\n"); | 486 | printk(KERN_CONT "\n"); |
| 617 | break; | 487 | break; |
| 488 | case -EPERM: | ||
| 489 | FTRACE_WARN_ON_ONCE(1); | ||
| 490 | pr_info("ftrace faulted on writing "); | ||
| 491 | print_ip_sym(ip); | ||
| 492 | break; | ||
| 493 | default: | ||
| 494 | FTRACE_WARN_ON_ONCE(1); | ||
| 495 | pr_info("ftrace faulted on unknown error "); | ||
| 496 | print_ip_sym(ip); | ||
| 618 | } | 497 | } |
| 619 | 498 | ||
| 620 | rec->flags |= FTRACE_FL_FAILED; | 499 | rec->flags |= FTRACE_FL_FAILED; |
| @@ -623,19 +502,11 @@ ftrace_code_disable(struct dyn_ftrace *rec) | |||
| 623 | return 1; | 502 | return 1; |
| 624 | } | 503 | } |
| 625 | 504 | ||
| 626 | static int __ftrace_update_code(void *ignore); | ||
| 627 | |||
| 628 | static int __ftrace_modify_code(void *data) | 505 | static int __ftrace_modify_code(void *data) |
| 629 | { | 506 | { |
| 630 | unsigned long addr; | ||
| 631 | int *command = data; | 507 | int *command = data; |
| 632 | 508 | ||
| 633 | if (*command & FTRACE_ENABLE_CALLS) { | 509 | if (*command & FTRACE_ENABLE_CALLS) { |
| 634 | /* | ||
| 635 | * Update any recorded ips now that we have the | ||
| 636 | * machine stopped | ||
| 637 | */ | ||
| 638 | __ftrace_update_code(NULL); | ||
| 639 | ftrace_replace_code(1); | 510 | ftrace_replace_code(1); |
| 640 | tracing_on = 1; | 511 | tracing_on = 1; |
| 641 | } else if (*command & FTRACE_DISABLE_CALLS) { | 512 | } else if (*command & FTRACE_DISABLE_CALLS) { |
| @@ -646,14 +517,6 @@ static int __ftrace_modify_code(void *data) | |||
| 646 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 517 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
| 647 | ftrace_update_ftrace_func(ftrace_trace_function); | 518 | ftrace_update_ftrace_func(ftrace_trace_function); |
| 648 | 519 | ||
| 649 | if (*command & FTRACE_ENABLE_MCOUNT) { | ||
| 650 | addr = (unsigned long)ftrace_record_ip; | ||
| 651 | ftrace_mcount_set(&addr); | ||
| 652 | } else if (*command & FTRACE_DISABLE_MCOUNT) { | ||
| 653 | addr = (unsigned long)ftrace_stub; | ||
| 654 | ftrace_mcount_set(&addr); | ||
| 655 | } | ||
| 656 | |||
| 657 | return 0; | 520 | return 0; |
| 658 | } | 521 | } |
| 659 | 522 | ||
| @@ -662,26 +525,9 @@ static void ftrace_run_update_code(int command) | |||
| 662 | stop_machine(__ftrace_modify_code, &command, NULL); | 525 | stop_machine(__ftrace_modify_code, &command, NULL); |
| 663 | } | 526 | } |
| 664 | 527 | ||
| 665 | void ftrace_disable_daemon(void) | ||
| 666 | { | ||
| 667 | /* Stop the daemon from calling kstop_machine */ | ||
| 668 | mutex_lock(&ftraced_lock); | ||
| 669 | ftraced_stop = 1; | ||
| 670 | mutex_unlock(&ftraced_lock); | ||
| 671 | |||
| 672 | ftrace_force_update(); | ||
| 673 | } | ||
| 674 | |||
| 675 | void ftrace_enable_daemon(void) | ||
| 676 | { | ||
| 677 | mutex_lock(&ftraced_lock); | ||
| 678 | ftraced_stop = 0; | ||
| 679 | mutex_unlock(&ftraced_lock); | ||
| 680 | |||
| 681 | ftrace_force_update(); | ||
| 682 | } | ||
| 683 | |||
| 684 | static ftrace_func_t saved_ftrace_func; | 528 | static ftrace_func_t saved_ftrace_func; |
| 529 | static int ftrace_start; | ||
| 530 | static DEFINE_MUTEX(ftrace_start_lock); | ||
| 685 | 531 | ||
| 686 | static void ftrace_startup(void) | 532 | static void ftrace_startup(void) |
| 687 | { | 533 | { |
| @@ -690,9 +536,9 @@ static void ftrace_startup(void) | |||
| 690 | if (unlikely(ftrace_disabled)) | 536 | if (unlikely(ftrace_disabled)) |
| 691 | return; | 537 | return; |
| 692 | 538 | ||
| 693 | mutex_lock(&ftraced_lock); | 539 | mutex_lock(&ftrace_start_lock); |
| 694 | ftraced_suspend++; | 540 | ftrace_start++; |
| 695 | if (ftraced_suspend == 1) | 541 | if (ftrace_start == 1) |
| 696 | command |= FTRACE_ENABLE_CALLS; | 542 | command |= FTRACE_ENABLE_CALLS; |
| 697 | 543 | ||
| 698 | if (saved_ftrace_func != ftrace_trace_function) { | 544 | if (saved_ftrace_func != ftrace_trace_function) { |
| @@ -705,7 +551,7 @@ static void ftrace_startup(void) | |||
| 705 | 551 | ||
| 706 | ftrace_run_update_code(command); | 552 | ftrace_run_update_code(command); |
| 707 | out: | 553 | out: |
| 708 | mutex_unlock(&ftraced_lock); | 554 | mutex_unlock(&ftrace_start_lock); |
| 709 | } | 555 | } |
| 710 | 556 | ||
| 711 | static void ftrace_shutdown(void) | 557 | static void ftrace_shutdown(void) |
| @@ -715,9 +561,9 @@ static void ftrace_shutdown(void) | |||
| 715 | if (unlikely(ftrace_disabled)) | 561 | if (unlikely(ftrace_disabled)) |
| 716 | return; | 562 | return; |
| 717 | 563 | ||
| 718 | mutex_lock(&ftraced_lock); | 564 | mutex_lock(&ftrace_start_lock); |
| 719 | ftraced_suspend--; | 565 | ftrace_start--; |
| 720 | if (!ftraced_suspend) | 566 | if (!ftrace_start) |
| 721 | command |= FTRACE_DISABLE_CALLS; | 567 | command |= FTRACE_DISABLE_CALLS; |
| 722 | 568 | ||
| 723 | if (saved_ftrace_func != ftrace_trace_function) { | 569 | if (saved_ftrace_func != ftrace_trace_function) { |
| @@ -730,7 +576,7 @@ static void ftrace_shutdown(void) | |||
| 730 | 576 | ||
| 731 | ftrace_run_update_code(command); | 577 | ftrace_run_update_code(command); |
| 732 | out: | 578 | out: |
| 733 | mutex_unlock(&ftraced_lock); | 579 | mutex_unlock(&ftrace_start_lock); |
| 734 | } | 580 | } |
| 735 | 581 | ||
| 736 | static void ftrace_startup_sysctl(void) | 582 | static void ftrace_startup_sysctl(void) |
| @@ -740,15 +586,15 @@ static void ftrace_startup_sysctl(void) | |||
| 740 | if (unlikely(ftrace_disabled)) | 586 | if (unlikely(ftrace_disabled)) |
| 741 | return; | 587 | return; |
| 742 | 588 | ||
| 743 | mutex_lock(&ftraced_lock); | 589 | mutex_lock(&ftrace_start_lock); |
| 744 | /* Force update next time */ | 590 | /* Force update next time */ |
| 745 | saved_ftrace_func = NULL; | 591 | saved_ftrace_func = NULL; |
| 746 | /* ftraced_suspend is true if we want ftrace running */ | 592 | /* ftrace_start is true if we want ftrace running */ |
| 747 | if (ftraced_suspend) | 593 | if (ftrace_start) |
| 748 | command |= FTRACE_ENABLE_CALLS; | 594 | command |= FTRACE_ENABLE_CALLS; |
| 749 | 595 | ||
| 750 | ftrace_run_update_code(command); | 596 | ftrace_run_update_code(command); |
| 751 | mutex_unlock(&ftraced_lock); | 597 | mutex_unlock(&ftrace_start_lock); |
| 752 | } | 598 | } |
| 753 | 599 | ||
| 754 | static void ftrace_shutdown_sysctl(void) | 600 | static void ftrace_shutdown_sysctl(void) |
| @@ -758,112 +604,50 @@ static void ftrace_shutdown_sysctl(void) | |||
| 758 | if (unlikely(ftrace_disabled)) | 604 | if (unlikely(ftrace_disabled)) |
| 759 | return; | 605 | return; |
| 760 | 606 | ||
| 761 | mutex_lock(&ftraced_lock); | 607 | mutex_lock(&ftrace_start_lock); |
| 762 | /* ftraced_suspend is true if ftrace is running */ | 608 | /* ftrace_start is true if ftrace is running */ |
| 763 | if (ftraced_suspend) | 609 | if (ftrace_start) |
| 764 | command |= FTRACE_DISABLE_CALLS; | 610 | command |= FTRACE_DISABLE_CALLS; |
| 765 | 611 | ||
| 766 | ftrace_run_update_code(command); | 612 | ftrace_run_update_code(command); |
| 767 | mutex_unlock(&ftraced_lock); | 613 | mutex_unlock(&ftrace_start_lock); |
| 768 | } | 614 | } |
| 769 | 615 | ||
| 770 | static cycle_t ftrace_update_time; | 616 | static cycle_t ftrace_update_time; |
| 771 | static unsigned long ftrace_update_cnt; | 617 | static unsigned long ftrace_update_cnt; |
| 772 | unsigned long ftrace_update_tot_cnt; | 618 | unsigned long ftrace_update_tot_cnt; |
| 773 | 619 | ||
| 774 | static int __ftrace_update_code(void *ignore) | 620 | static int ftrace_update_code(void) |
| 775 | { | 621 | { |
| 776 | int i, save_ftrace_enabled; | 622 | struct dyn_ftrace *p, *t; |
| 777 | cycle_t start, stop; | 623 | cycle_t start, stop; |
| 778 | struct dyn_ftrace *p; | ||
| 779 | struct hlist_node *t, *n; | ||
| 780 | struct hlist_head *head, temp_list; | ||
| 781 | |||
| 782 | /* Don't be recording funcs now */ | ||
| 783 | ftrace_record_suspend++; | ||
| 784 | save_ftrace_enabled = ftrace_enabled; | ||
| 785 | ftrace_enabled = 0; | ||
| 786 | 624 | ||
| 787 | start = ftrace_now(raw_smp_processor_id()); | 625 | start = ftrace_now(raw_smp_processor_id()); |
| 788 | ftrace_update_cnt = 0; | 626 | ftrace_update_cnt = 0; |
| 789 | 627 | ||
| 790 | /* No locks needed, the machine is stopped! */ | 628 | list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { |
| 791 | for (i = 0; i < FTRACE_HASHSIZE; i++) { | ||
| 792 | INIT_HLIST_HEAD(&temp_list); | ||
| 793 | head = &ftrace_hash[i]; | ||
| 794 | |||
| 795 | /* all CPUS are stopped, we are safe to modify code */ | ||
| 796 | hlist_for_each_entry_safe(p, t, n, head, node) { | ||
| 797 | /* Skip over failed records which have not been | ||
| 798 | * freed. */ | ||
| 799 | if (p->flags & FTRACE_FL_FAILED) | ||
| 800 | continue; | ||
| 801 | |||
| 802 | /* Unconverted records are always at the head of the | ||
| 803 | * hash bucket. Once we encounter a converted record, | ||
| 804 | * simply skip over to the next bucket. Saves ftraced | ||
| 805 | * some processor cycles (ftrace does its bid for | ||
| 806 | * global warming :-p ). */ | ||
| 807 | if (p->flags & (FTRACE_FL_CONVERTED)) | ||
| 808 | break; | ||
| 809 | 629 | ||
| 810 | /* Ignore updates to this record's mcount site. | 630 | /* If something went wrong, bail without enabling anything */ |
| 811 | * Reintroduce this record at the head of this | 631 | if (unlikely(ftrace_disabled)) |
| 812 | * bucket to attempt to "convert" it again if | 632 | return -1; |
| 813 | * the kprobe on it is unregistered before the | ||
| 814 | * next run. */ | ||
| 815 | if (get_kprobe((void *)p->ip)) { | ||
| 816 | ftrace_del_hash(p); | ||
| 817 | INIT_HLIST_NODE(&p->node); | ||
| 818 | hlist_add_head(&p->node, &temp_list); | ||
| 819 | freeze_record(p); | ||
| 820 | continue; | ||
| 821 | } else { | ||
| 822 | unfreeze_record(p); | ||
| 823 | } | ||
| 824 | 633 | ||
| 825 | /* convert record (i.e, patch mcount-call with NOP) */ | 634 | list_del_init(&p->list); |
| 826 | if (ftrace_code_disable(p)) { | ||
| 827 | p->flags |= FTRACE_FL_CONVERTED; | ||
| 828 | ftrace_update_cnt++; | ||
| 829 | } else { | ||
| 830 | if ((system_state == SYSTEM_BOOTING) || | ||
| 831 | !core_kernel_text(p->ip)) { | ||
| 832 | ftrace_del_hash(p); | ||
| 833 | ftrace_free_rec(p); | ||
| 834 | } | ||
| 835 | } | ||
| 836 | } | ||
| 837 | 635 | ||
| 838 | hlist_for_each_entry_safe(p, t, n, &temp_list, node) { | 636 | /* convert record (i.e, patch mcount-call with NOP) */ |
| 839 | hlist_del(&p->node); | 637 | if (ftrace_code_disable(p)) { |
| 840 | INIT_HLIST_NODE(&p->node); | 638 | p->flags |= FTRACE_FL_CONVERTED; |
| 841 | hlist_add_head(&p->node, head); | 639 | ftrace_update_cnt++; |
| 842 | } | 640 | } else |
| 641 | ftrace_free_rec(p); | ||
| 843 | } | 642 | } |
| 844 | 643 | ||
| 845 | stop = ftrace_now(raw_smp_processor_id()); | 644 | stop = ftrace_now(raw_smp_processor_id()); |
| 846 | ftrace_update_time = stop - start; | 645 | ftrace_update_time = stop - start; |
| 847 | ftrace_update_tot_cnt += ftrace_update_cnt; | 646 | ftrace_update_tot_cnt += ftrace_update_cnt; |
| 848 | ftraced_trigger = 0; | ||
| 849 | |||
| 850 | ftrace_enabled = save_ftrace_enabled; | ||
| 851 | ftrace_record_suspend--; | ||
| 852 | 647 | ||
| 853 | return 0; | 648 | return 0; |
| 854 | } | 649 | } |
| 855 | 650 | ||
| 856 | static int ftrace_update_code(void) | ||
| 857 | { | ||
| 858 | if (unlikely(ftrace_disabled) || | ||
| 859 | !ftrace_enabled || !ftraced_trigger) | ||
| 860 | return 0; | ||
| 861 | |||
| 862 | stop_machine(__ftrace_update_code, NULL, NULL); | ||
| 863 | |||
| 864 | return 1; | ||
| 865 | } | ||
| 866 | |||
| 867 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | 651 | static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) |
| 868 | { | 652 | { |
| 869 | struct ftrace_page *pg; | 653 | struct ftrace_page *pg; |
| @@ -892,7 +676,7 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) | |||
| 892 | pg = ftrace_pages = ftrace_pages_start; | 676 | pg = ftrace_pages = ftrace_pages_start; |
| 893 | 677 | ||
| 894 | cnt = num_to_init / ENTRIES_PER_PAGE; | 678 | cnt = num_to_init / ENTRIES_PER_PAGE; |
| 895 | pr_info("ftrace: allocating %ld hash entries in %d pages\n", | 679 | pr_info("ftrace: allocating %ld entries in %d pages\n", |
| 896 | num_to_init, cnt); | 680 | num_to_init, cnt); |
| 897 | 681 | ||
| 898 | for (i = 0; i < cnt; i++) { | 682 | for (i = 0; i < cnt; i++) { |
| @@ -1401,10 +1185,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
| 1401 | } | 1185 | } |
| 1402 | 1186 | ||
| 1403 | mutex_lock(&ftrace_sysctl_lock); | 1187 | mutex_lock(&ftrace_sysctl_lock); |
| 1404 | mutex_lock(&ftraced_lock); | 1188 | mutex_lock(&ftrace_start_lock); |
| 1405 | if (iter->filtered && ftraced_suspend && ftrace_enabled) | 1189 | if (iter->filtered && ftrace_start && ftrace_enabled) |
| 1406 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1190 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
| 1407 | mutex_unlock(&ftraced_lock); | 1191 | mutex_unlock(&ftrace_start_lock); |
| 1408 | mutex_unlock(&ftrace_sysctl_lock); | 1192 | mutex_unlock(&ftrace_sysctl_lock); |
| 1409 | 1193 | ||
| 1410 | kfree(iter); | 1194 | kfree(iter); |
| @@ -1424,55 +1208,6 @@ ftrace_notrace_release(struct inode *inode, struct file *file) | |||
| 1424 | return ftrace_regex_release(inode, file, 0); | 1208 | return ftrace_regex_release(inode, file, 0); |
| 1425 | } | 1209 | } |
| 1426 | 1210 | ||
| 1427 | static ssize_t | ||
| 1428 | ftraced_read(struct file *filp, char __user *ubuf, | ||
| 1429 | size_t cnt, loff_t *ppos) | ||
| 1430 | { | ||
| 1431 | /* don't worry about races */ | ||
| 1432 | char *buf = ftraced_stop ? "disabled\n" : "enabled\n"; | ||
| 1433 | int r = strlen(buf); | ||
| 1434 | |||
| 1435 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
| 1436 | } | ||
| 1437 | |||
| 1438 | static ssize_t | ||
| 1439 | ftraced_write(struct file *filp, const char __user *ubuf, | ||
| 1440 | size_t cnt, loff_t *ppos) | ||
| 1441 | { | ||
| 1442 | char buf[64]; | ||
| 1443 | long val; | ||
| 1444 | int ret; | ||
| 1445 | |||
| 1446 | if (cnt >= sizeof(buf)) | ||
| 1447 | return -EINVAL; | ||
| 1448 | |||
| 1449 | if (copy_from_user(&buf, ubuf, cnt)) | ||
| 1450 | return -EFAULT; | ||
| 1451 | |||
| 1452 | if (strncmp(buf, "enable", 6) == 0) | ||
| 1453 | val = 1; | ||
| 1454 | else if (strncmp(buf, "disable", 7) == 0) | ||
| 1455 | val = 0; | ||
| 1456 | else { | ||
| 1457 | buf[cnt] = 0; | ||
| 1458 | |||
| 1459 | ret = strict_strtoul(buf, 10, &val); | ||
| 1460 | if (ret < 0) | ||
| 1461 | return ret; | ||
| 1462 | |||
| 1463 | val = !!val; | ||
| 1464 | } | ||
| 1465 | |||
| 1466 | if (val) | ||
| 1467 | ftrace_enable_daemon(); | ||
| 1468 | else | ||
| 1469 | ftrace_disable_daemon(); | ||
| 1470 | |||
| 1471 | filp->f_pos += cnt; | ||
| 1472 | |||
| 1473 | return cnt; | ||
| 1474 | } | ||
| 1475 | |||
| 1476 | static struct file_operations ftrace_avail_fops = { | 1211 | static struct file_operations ftrace_avail_fops = { |
| 1477 | .open = ftrace_avail_open, | 1212 | .open = ftrace_avail_open, |
| 1478 | .read = seq_read, | 1213 | .read = seq_read, |
| @@ -1503,54 +1238,6 @@ static struct file_operations ftrace_notrace_fops = { | |||
| 1503 | .release = ftrace_notrace_release, | 1238 | .release = ftrace_notrace_release, |
| 1504 | }; | 1239 | }; |
| 1505 | 1240 | ||
| 1506 | static struct file_operations ftraced_fops = { | ||
| 1507 | .open = tracing_open_generic, | ||
| 1508 | .read = ftraced_read, | ||
| 1509 | .write = ftraced_write, | ||
| 1510 | }; | ||
| 1511 | |||
| 1512 | /** | ||
| 1513 | * ftrace_force_update - force an update to all recording ftrace functions | ||
| 1514 | */ | ||
| 1515 | int ftrace_force_update(void) | ||
| 1516 | { | ||
| 1517 | int ret = 0; | ||
| 1518 | |||
| 1519 | if (unlikely(ftrace_disabled)) | ||
| 1520 | return -ENODEV; | ||
| 1521 | |||
| 1522 | mutex_lock(&ftrace_sysctl_lock); | ||
| 1523 | mutex_lock(&ftraced_lock); | ||
| 1524 | |||
| 1525 | /* | ||
| 1526 | * If ftraced_trigger is not set, then there is nothing | ||
| 1527 | * to update. | ||
| 1528 | */ | ||
| 1529 | if (ftraced_trigger && !ftrace_update_code()) | ||
| 1530 | ret = -EBUSY; | ||
| 1531 | |||
| 1532 | mutex_unlock(&ftraced_lock); | ||
| 1533 | mutex_unlock(&ftrace_sysctl_lock); | ||
| 1534 | |||
| 1535 | return ret; | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | static void ftrace_force_shutdown(void) | ||
| 1539 | { | ||
| 1540 | struct task_struct *task; | ||
| 1541 | int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC; | ||
| 1542 | |||
| 1543 | mutex_lock(&ftraced_lock); | ||
| 1544 | task = ftraced_task; | ||
| 1545 | ftraced_task = NULL; | ||
| 1546 | ftraced_suspend = -1; | ||
| 1547 | ftrace_run_update_code(command); | ||
| 1548 | mutex_unlock(&ftraced_lock); | ||
| 1549 | |||
| 1550 | if (task) | ||
| 1551 | kthread_stop(task); | ||
| 1552 | } | ||
| 1553 | |||
| 1554 | static __init int ftrace_init_debugfs(void) | 1241 | static __init int ftrace_init_debugfs(void) |
| 1555 | { | 1242 | { |
| 1556 | struct dentry *d_tracer; | 1243 | struct dentry *d_tracer; |
| @@ -1581,17 +1268,11 @@ static __init int ftrace_init_debugfs(void) | |||
| 1581 | pr_warning("Could not create debugfs " | 1268 | pr_warning("Could not create debugfs " |
| 1582 | "'set_ftrace_notrace' entry\n"); | 1269 | "'set_ftrace_notrace' entry\n"); |
| 1583 | 1270 | ||
| 1584 | entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer, | ||
| 1585 | NULL, &ftraced_fops); | ||
| 1586 | if (!entry) | ||
| 1587 | pr_warning("Could not create debugfs " | ||
| 1588 | "'ftraced_enabled' entry\n"); | ||
| 1589 | return 0; | 1271 | return 0; |
| 1590 | } | 1272 | } |
| 1591 | 1273 | ||
| 1592 | fs_initcall(ftrace_init_debugfs); | 1274 | fs_initcall(ftrace_init_debugfs); |
| 1593 | 1275 | ||
| 1594 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | ||
| 1595 | static int ftrace_convert_nops(unsigned long *start, | 1276 | static int ftrace_convert_nops(unsigned long *start, |
| 1596 | unsigned long *end) | 1277 | unsigned long *end) |
| 1597 | { | 1278 | { |
| @@ -1599,20 +1280,18 @@ static int ftrace_convert_nops(unsigned long *start, | |||
| 1599 | unsigned long addr; | 1280 | unsigned long addr; |
| 1600 | unsigned long flags; | 1281 | unsigned long flags; |
| 1601 | 1282 | ||
| 1283 | mutex_lock(&ftrace_start_lock); | ||
| 1602 | p = start; | 1284 | p = start; |
| 1603 | while (p < end) { | 1285 | while (p < end) { |
| 1604 | addr = ftrace_call_adjust(*p++); | 1286 | addr = ftrace_call_adjust(*p++); |
| 1605 | /* should not be called from interrupt context */ | ||
| 1606 | spin_lock(&ftrace_lock); | ||
| 1607 | ftrace_record_ip(addr); | 1287 | ftrace_record_ip(addr); |
| 1608 | spin_unlock(&ftrace_lock); | ||
| 1609 | ftrace_shutdown_replenish(); | ||
| 1610 | } | 1288 | } |
| 1611 | 1289 | ||
| 1612 | /* p is ignored */ | 1290 | /* disable interrupts to prevent kstop machine */ |
| 1613 | local_irq_save(flags); | 1291 | local_irq_save(flags); |
| 1614 | __ftrace_update_code(p); | 1292 | ftrace_update_code(); |
| 1615 | local_irq_restore(flags); | 1293 | local_irq_restore(flags); |
| 1294 | mutex_unlock(&ftrace_start_lock); | ||
| 1616 | 1295 | ||
| 1617 | return 0; | 1296 | return 0; |
| 1618 | } | 1297 | } |
| @@ -1658,130 +1337,26 @@ void __init ftrace_init(void) | |||
| 1658 | failed: | 1337 | failed: |
| 1659 | ftrace_disabled = 1; | 1338 | ftrace_disabled = 1; |
| 1660 | } | 1339 | } |
| 1661 | #else /* CONFIG_FTRACE_MCOUNT_RECORD */ | ||
| 1662 | static int ftraced(void *ignore) | ||
| 1663 | { | ||
| 1664 | unsigned long usecs; | ||
| 1665 | |||
| 1666 | while (!kthread_should_stop()) { | ||
| 1667 | |||
| 1668 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1669 | |||
| 1670 | /* check once a second */ | ||
| 1671 | schedule_timeout(HZ); | ||
| 1672 | |||
| 1673 | if (unlikely(ftrace_disabled)) | ||
| 1674 | continue; | ||
| 1675 | |||
| 1676 | mutex_lock(&ftrace_sysctl_lock); | ||
| 1677 | mutex_lock(&ftraced_lock); | ||
| 1678 | if (!ftraced_suspend && !ftraced_stop && | ||
| 1679 | ftrace_update_code()) { | ||
| 1680 | usecs = nsecs_to_usecs(ftrace_update_time); | ||
| 1681 | if (ftrace_update_tot_cnt > 100000) { | ||
| 1682 | ftrace_update_tot_cnt = 0; | ||
| 1683 | pr_info("hm, dftrace overflow: %lu change%s" | ||
| 1684 | " (%lu total) in %lu usec%s\n", | ||
| 1685 | ftrace_update_cnt, | ||
| 1686 | ftrace_update_cnt != 1 ? "s" : "", | ||
| 1687 | ftrace_update_tot_cnt, | ||
| 1688 | usecs, usecs != 1 ? "s" : ""); | ||
| 1689 | ftrace_disabled = 1; | ||
| 1690 | WARN_ON_ONCE(1); | ||
| 1691 | } | ||
| 1692 | } | ||
| 1693 | mutex_unlock(&ftraced_lock); | ||
| 1694 | mutex_unlock(&ftrace_sysctl_lock); | ||
| 1695 | |||
| 1696 | ftrace_shutdown_replenish(); | ||
| 1697 | } | ||
| 1698 | __set_current_state(TASK_RUNNING); | ||
| 1699 | return 0; | ||
| 1700 | } | ||
| 1701 | |||
| 1702 | static int __init ftrace_dynamic_init(void) | ||
| 1703 | { | ||
| 1704 | struct task_struct *p; | ||
| 1705 | unsigned long addr; | ||
| 1706 | int ret; | ||
| 1707 | |||
| 1708 | addr = (unsigned long)ftrace_record_ip; | ||
| 1709 | |||
| 1710 | stop_machine(ftrace_dyn_arch_init, &addr, NULL); | ||
| 1711 | |||
| 1712 | /* ftrace_dyn_arch_init places the return code in addr */ | ||
| 1713 | if (addr) { | ||
| 1714 | ret = (int)addr; | ||
| 1715 | goto failed; | ||
| 1716 | } | ||
| 1717 | |||
| 1718 | ret = ftrace_dyn_table_alloc(NR_TO_INIT); | ||
| 1719 | if (ret) | ||
| 1720 | goto failed; | ||
| 1721 | |||
| 1722 | p = kthread_run(ftraced, NULL, "ftraced"); | ||
| 1723 | if (IS_ERR(p)) { | ||
| 1724 | ret = -1; | ||
| 1725 | goto failed; | ||
| 1726 | } | ||
| 1727 | |||
| 1728 | last_ftrace_enabled = ftrace_enabled = 1; | ||
| 1729 | ftraced_task = p; | ||
| 1730 | |||
| 1731 | return 0; | ||
| 1732 | |||
| 1733 | failed: | ||
| 1734 | ftrace_disabled = 1; | ||
| 1735 | return ret; | ||
| 1736 | } | ||
| 1737 | |||
| 1738 | core_initcall(ftrace_dynamic_init); | ||
| 1739 | #endif /* CONFIG_FTRACE_MCOUNT_RECORD */ | ||
| 1740 | 1340 | ||
| 1741 | #else | 1341 | #else |
| 1742 | # define ftrace_startup() do { } while (0) | 1342 | # define ftrace_startup() do { } while (0) |
| 1743 | # define ftrace_shutdown() do { } while (0) | 1343 | # define ftrace_shutdown() do { } while (0) |
| 1744 | # define ftrace_startup_sysctl() do { } while (0) | 1344 | # define ftrace_startup_sysctl() do { } while (0) |
| 1745 | # define ftrace_shutdown_sysctl() do { } while (0) | 1345 | # define ftrace_shutdown_sysctl() do { } while (0) |
| 1746 | # define ftrace_force_shutdown() do { } while (0) | ||
| 1747 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1346 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 1748 | 1347 | ||
| 1749 | /** | 1348 | /** |
| 1750 | * ftrace_kill_atomic - kill ftrace from critical sections | 1349 | * ftrace_kill - kill ftrace |
| 1751 | * | 1350 | * |
| 1752 | * This function should be used by panic code. It stops ftrace | 1351 | * This function should be used by panic code. It stops ftrace |
| 1753 | * but in a not so nice way. If you need to simply kill ftrace | 1352 | * but in a not so nice way. If you need to simply kill ftrace |
| 1754 | * from a non-atomic section, use ftrace_kill. | 1353 | * from a non-atomic section, use ftrace_kill. |
| 1755 | */ | 1354 | */ |
| 1756 | void ftrace_kill_atomic(void) | ||
| 1757 | { | ||
| 1758 | ftrace_disabled = 1; | ||
| 1759 | ftrace_enabled = 0; | ||
| 1760 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 1761 | ftraced_suspend = -1; | ||
| 1762 | #endif | ||
| 1763 | clear_ftrace_function(); | ||
| 1764 | } | ||
| 1765 | |||
| 1766 | /** | ||
| 1767 | * ftrace_kill - totally shutdown ftrace | ||
| 1768 | * | ||
| 1769 | * This is a safety measure. If something was detected that seems | ||
| 1770 | * wrong, calling this function will keep ftrace from doing | ||
| 1771 | * any more modifications, and updates. | ||
| 1772 | * used when something went wrong. | ||
| 1773 | */ | ||
| 1774 | void ftrace_kill(void) | 1355 | void ftrace_kill(void) |
| 1775 | { | 1356 | { |
| 1776 | mutex_lock(&ftrace_sysctl_lock); | ||
| 1777 | ftrace_disabled = 1; | 1357 | ftrace_disabled = 1; |
| 1778 | ftrace_enabled = 0; | 1358 | ftrace_enabled = 0; |
| 1779 | |||
| 1780 | clear_ftrace_function(); | 1359 | clear_ftrace_function(); |
| 1781 | mutex_unlock(&ftrace_sysctl_lock); | ||
| 1782 | |||
| 1783 | /* Try to totally disable ftrace */ | ||
| 1784 | ftrace_force_shutdown(); | ||
| 1785 | } | 1360 | } |
| 1786 | 1361 | ||
| 1787 | /** | 1362 | /** |
| @@ -1870,3 +1445,4 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 1870 | mutex_unlock(&ftrace_sysctl_lock); | 1445 | mutex_unlock(&ftrace_sysctl_lock); |
| 1871 | return ret; | 1446 | return ret; |
| 1872 | } | 1447 | } |
| 1448 | |||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 94af1fe56bb4..cedf4e268285 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -130,7 +130,7 @@ struct buffer_page { | |||
| 130 | static inline void free_buffer_page(struct buffer_page *bpage) | 130 | static inline void free_buffer_page(struct buffer_page *bpage) |
| 131 | { | 131 | { |
| 132 | if (bpage->page) | 132 | if (bpage->page) |
| 133 | __free_page(bpage->page); | 133 | free_page((unsigned long)bpage->page); |
| 134 | kfree(bpage); | 134 | kfree(bpage); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| @@ -966,7 +966,9 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 966 | if (unlikely(*delta > (1ULL << 59) && !once++)) { | 966 | if (unlikely(*delta > (1ULL << 59) && !once++)) { |
| 967 | printk(KERN_WARNING "Delta way too big! %llu" | 967 | printk(KERN_WARNING "Delta way too big! %llu" |
| 968 | " ts=%llu write stamp = %llu\n", | 968 | " ts=%llu write stamp = %llu\n", |
| 969 | *delta, *ts, cpu_buffer->write_stamp); | 969 | (unsigned long long)*delta, |
| 970 | (unsigned long long)*ts, | ||
| 971 | (unsigned long long)cpu_buffer->write_stamp); | ||
| 970 | WARN_ON(1); | 972 | WARN_ON(1); |
| 971 | } | 973 | } |
| 972 | 974 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d345d649d073..a610ca771558 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | #include <linux/stacktrace.h> | 35 | #include <linux/stacktrace.h> |
| 36 | #include <linux/ring_buffer.h> | 36 | #include <linux/ring_buffer.h> |
| 37 | #include <linux/irqflags.h> | ||
| 37 | 38 | ||
| 38 | #include "trace.h" | 39 | #include "trace.h" |
| 39 | 40 | ||
| @@ -851,7 +852,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
| 851 | preempt_enable_notrace(); | 852 | preempt_enable_notrace(); |
| 852 | } | 853 | } |
| 853 | 854 | ||
| 854 | #ifdef CONFIG_FTRACE | 855 | #ifdef CONFIG_FUNCTION_TRACER |
| 855 | static void | 856 | static void |
| 856 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 857 | function_trace_call(unsigned long ip, unsigned long parent_ip) |
| 857 | { | 858 | { |
| @@ -865,9 +866,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
| 865 | if (unlikely(!ftrace_function_enabled)) | 866 | if (unlikely(!ftrace_function_enabled)) |
| 866 | return; | 867 | return; |
| 867 | 868 | ||
| 868 | if (skip_trace(ip)) | ||
| 869 | return; | ||
| 870 | |||
| 871 | pc = preempt_count(); | 869 | pc = preempt_count(); |
| 872 | resched = need_resched(); | 870 | resched = need_resched(); |
| 873 | preempt_disable_notrace(); | 871 | preempt_disable_notrace(); |
| @@ -2379,9 +2377,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
| 2379 | int i; | 2377 | int i; |
| 2380 | size_t ret; | 2378 | size_t ret; |
| 2381 | 2379 | ||
| 2380 | ret = cnt; | ||
| 2381 | |||
| 2382 | if (cnt > max_tracer_type_len) | 2382 | if (cnt > max_tracer_type_len) |
| 2383 | cnt = max_tracer_type_len; | 2383 | cnt = max_tracer_type_len; |
| 2384 | ret = cnt; | ||
| 2385 | 2384 | ||
| 2386 | if (copy_from_user(&buf, ubuf, cnt)) | 2385 | if (copy_from_user(&buf, ubuf, cnt)) |
| 2387 | return -EFAULT; | 2386 | return -EFAULT; |
| @@ -2414,8 +2413,8 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
| 2414 | out: | 2413 | out: |
| 2415 | mutex_unlock(&trace_types_lock); | 2414 | mutex_unlock(&trace_types_lock); |
| 2416 | 2415 | ||
| 2417 | if (ret == cnt) | 2416 | if (ret > 0) |
| 2418 | filp->f_pos += cnt; | 2417 | filp->f_pos += ret; |
| 2419 | 2418 | ||
| 2420 | return ret; | 2419 | return ret; |
| 2421 | } | 2420 | } |
| @@ -3097,7 +3096,7 @@ void ftrace_dump(void) | |||
| 3097 | dump_ran = 1; | 3096 | dump_ran = 1; |
| 3098 | 3097 | ||
| 3099 | /* No turning back! */ | 3098 | /* No turning back! */ |
| 3100 | ftrace_kill_atomic(); | 3099 | ftrace_kill(); |
| 3101 | 3100 | ||
| 3102 | for_each_tracing_cpu(cpu) { | 3101 | for_each_tracing_cpu(cpu) { |
| 3103 | atomic_inc(&global_trace.data[cpu]->disabled); | 3102 | atomic_inc(&global_trace.data[cpu]->disabled); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f1f99572cde7..6889ca48f1f1 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -335,7 +335,7 @@ void update_max_tr_single(struct trace_array *tr, | |||
| 335 | 335 | ||
| 336 | extern cycle_t ftrace_now(int cpu); | 336 | extern cycle_t ftrace_now(int cpu); |
| 337 | 337 | ||
| 338 | #ifdef CONFIG_FTRACE | 338 | #ifdef CONFIG_FUNCTION_TRACER |
| 339 | void tracing_start_function_trace(void); | 339 | void tracing_start_function_trace(void); |
| 340 | void tracing_stop_function_trace(void); | 340 | void tracing_stop_function_trace(void); |
| 341 | #else | 341 | #else |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index e90eb0c2c56c..0f85a64003d3 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -64,7 +64,7 @@ static void function_trace_ctrl_update(struct trace_array *tr) | |||
| 64 | 64 | ||
| 65 | static struct tracer function_trace __read_mostly = | 65 | static struct tracer function_trace __read_mostly = |
| 66 | { | 66 | { |
| 67 | .name = "ftrace", | 67 | .name = "function", |
| 68 | .init = function_trace_init, | 68 | .init = function_trace_init, |
| 69 | .reset = function_trace_reset, | 69 | .reset = function_trace_reset, |
| 70 | .ctrl_update = function_trace_ctrl_update, | 70 | .ctrl_update = function_trace_ctrl_update, |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a7db7f040ae0..9c74071c10e0 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -63,7 +63,7 @@ irq_trace(void) | |||
| 63 | */ | 63 | */ |
| 64 | static __cacheline_aligned_in_smp unsigned long max_sequence; | 64 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
| 65 | 65 | ||
| 66 | #ifdef CONFIG_FTRACE | 66 | #ifdef CONFIG_FUNCTION_TRACER |
| 67 | /* | 67 | /* |
| 68 | * irqsoff uses its own tracer function to keep the overhead down: | 68 | * irqsoff uses its own tracer function to keep the overhead down: |
| 69 | */ | 69 | */ |
| @@ -104,7 +104,7 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
| 104 | { | 104 | { |
| 105 | .func = irqsoff_tracer_call, | 105 | .func = irqsoff_tracer_call, |
| 106 | }; | 106 | }; |
| 107 | #endif /* CONFIG_FTRACE */ | 107 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 108 | 108 | ||
| 109 | /* | 109 | /* |
| 110 | * Should this new latency be reported/recorded? | 110 | * Should this new latency be reported/recorded? |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index fe4a252c2363..3ae93f16b565 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -31,7 +31,7 @@ static raw_spinlock_t wakeup_lock = | |||
| 31 | 31 | ||
| 32 | static void __wakeup_reset(struct trace_array *tr); | 32 | static void __wakeup_reset(struct trace_array *tr); |
| 33 | 33 | ||
| 34 | #ifdef CONFIG_FTRACE | 34 | #ifdef CONFIG_FUNCTION_TRACER |
| 35 | /* | 35 | /* |
| 36 | * irqsoff uses its own tracer function to keep the overhead down: | 36 | * irqsoff uses its own tracer function to keep the overhead down: |
| 37 | */ | 37 | */ |
| @@ -96,7 +96,7 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
| 96 | { | 96 | { |
| 97 | .func = wakeup_tracer_call, | 97 | .func = wakeup_tracer_call, |
| 98 | }; | 98 | }; |
| 99 | #endif /* CONFIG_FTRACE */ | 99 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 100 | 100 | ||
| 101 | /* | 101 | /* |
| 102 | * Should this new latency be reported/recorded? | 102 | * Should this new latency be reported/recorded? |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 09cf230d7eca..90bc752a7580 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -70,7 +70,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
| 70 | return ret; | 70 | return ret; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | #ifdef CONFIG_FTRACE | 73 | #ifdef CONFIG_FUNCTION_TRACER |
| 74 | 74 | ||
| 75 | #ifdef CONFIG_DYNAMIC_FTRACE | 75 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 76 | 76 | ||
| @@ -99,13 +99,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
| 99 | /* passed in by parameter to fool gcc from optimizing */ | 99 | /* passed in by parameter to fool gcc from optimizing */ |
| 100 | func(); | 100 | func(); |
| 101 | 101 | ||
| 102 | /* update the records */ | ||
| 103 | ret = ftrace_force_update(); | ||
| 104 | if (ret) { | ||
| 105 | printk(KERN_CONT ".. ftraced failed .. "); | ||
| 106 | return ret; | ||
| 107 | } | ||
| 108 | |||
| 109 | /* | 102 | /* |
| 110 | * Some archs *cough*PowerPC*cough* add charachters to the | 103 | * Some archs *cough*PowerPC*cough* add charachters to the |
| 111 | * start of the function names. We simply put a '*' to | 104 | * start of the function names. We simply put a '*' to |
| @@ -183,13 +176,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
| 183 | /* make sure msleep has been recorded */ | 176 | /* make sure msleep has been recorded */ |
| 184 | msleep(1); | 177 | msleep(1); |
| 185 | 178 | ||
| 186 | /* force the recorded functions to be traced */ | ||
| 187 | ret = ftrace_force_update(); | ||
| 188 | if (ret) { | ||
| 189 | printk(KERN_CONT ".. ftraced failed .. "); | ||
| 190 | return ret; | ||
| 191 | } | ||
| 192 | |||
| 193 | /* start the tracing */ | 179 | /* start the tracing */ |
| 194 | ftrace_enabled = 1; | 180 | ftrace_enabled = 1; |
| 195 | tracer_enabled = 1; | 181 | tracer_enabled = 1; |
| @@ -226,7 +212,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
| 226 | 212 | ||
| 227 | return ret; | 213 | return ret; |
| 228 | } | 214 | } |
| 229 | #endif /* CONFIG_FTRACE */ | 215 | #endif /* CONFIG_FUNCTION_TRACER */ |
| 230 | 216 | ||
| 231 | #ifdef CONFIG_IRQSOFF_TRACER | 217 | #ifdef CONFIG_IRQSOFF_TRACER |
| 232 | int | 218 | int |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 74c5d9a3afae..be682b62fe58 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -44,6 +44,10 @@ static inline void check_stack(void) | |||
| 44 | if (this_size <= max_stack_size) | 44 | if (this_size <= max_stack_size) |
| 45 | return; | 45 | return; |
| 46 | 46 | ||
| 47 | /* we do not handle interrupt stacks yet */ | ||
| 48 | if (!object_is_on_stack(&this_size)) | ||
| 49 | return; | ||
| 50 | |||
| 47 | raw_local_irq_save(flags); | 51 | raw_local_irq_save(flags); |
| 48 | __raw_spin_lock(&max_stack_lock); | 52 | __raw_spin_lock(&max_stack_lock); |
| 49 | 53 | ||
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index f2b7c28a4708..af8c85664882 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
| @@ -131,6 +131,9 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) | |||
| 131 | 131 | ||
| 132 | old = entry->funcs; | 132 | old = entry->funcs; |
| 133 | 133 | ||
| 134 | if (!old) | ||
| 135 | return NULL; | ||
| 136 | |||
| 134 | debug_print_probes(entry); | 137 | debug_print_probes(entry); |
| 135 | /* (N -> M), (N > 1, M >= 0) probes */ | 138 | /* (N -> M), (N > 1, M >= 0) probes */ |
| 136 | for (nr_probes = 0; old[nr_probes]; nr_probes++) { | 139 | for (nr_probes = 0; old[nr_probes]; nr_probes++) { |
| @@ -388,6 +391,11 @@ int tracepoint_probe_unregister(const char *name, void *probe) | |||
| 388 | if (entry->rcu_pending) | 391 | if (entry->rcu_pending) |
| 389 | rcu_barrier_sched(); | 392 | rcu_barrier_sched(); |
| 390 | old = tracepoint_entry_remove_probe(entry, probe); | 393 | old = tracepoint_entry_remove_probe(entry, probe); |
| 394 | if (!old) { | ||
| 395 | printk(KERN_WARNING "Warning: Trying to unregister a probe" | ||
| 396 | "that doesn't exist\n"); | ||
| 397 | goto end; | ||
| 398 | } | ||
| 391 | mutex_unlock(&tracepoints_mutex); | 399 | mutex_unlock(&tracepoints_mutex); |
| 392 | tracepoint_update_probes(); /* may update entry */ | 400 | tracepoint_update_probes(); /* may update entry */ |
| 393 | mutex_lock(&tracepoints_mutex); | 401 | mutex_lock(&tracepoints_mutex); |
