diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/futex.c | 1 | ||||
| -rw-r--r-- | kernel/hrtimer.c | 110 | ||||
| -rw-r--r-- | kernel/kprobes.c | 6 | ||||
| -rw-r--r-- | kernel/perf_counter.c | 1 | ||||
| -rw-r--r-- | kernel/pid.c | 7 | ||||
| -rw-r--r-- | kernel/power/user.c | 1 | ||||
| -rw-r--r-- | kernel/rcutree.c | 3 | ||||
| -rw-r--r-- | kernel/sched.c | 57 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 3 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 18 | ||||
| -rw-r--r-- | kernel/time/clockevents.c | 11 | ||||
| -rw-r--r-- | kernel/trace/Kconfig | 6 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 1 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 9 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 1 | ||||
| -rw-r--r-- | kernel/trace/trace_event_types.h | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_functions.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 4 | 
19 files changed, 145 insertions, 102 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 794c862125fe..0672ff88f159 100644 --- a/kernel/futex.c +++ b/kernel/futex.c  | |||
| @@ -247,6 +247,7 @@ again: | |||
| 247 | if (err < 0) | 247 | if (err < 0) | 
| 248 | return err; | 248 | return err; | 
| 249 | 249 | ||
| 250 | page = compound_head(page); | ||
| 250 | lock_page(page); | 251 | lock_page(page); | 
| 251 | if (!page->mapping) { | 252 | if (!page->mapping) { | 
| 252 | unlock_page(page); | 253 | unlock_page(page); | 
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 9002958a96e7..49da79ab8486 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c  | |||
| @@ -191,6 +191,46 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
| 191 | } | 191 | } | 
| 192 | } | 192 | } | 
| 193 | 193 | ||
| 194 | |||
| 195 | /* | ||
| 196 | * Get the preferred target CPU for NOHZ | ||
| 197 | */ | ||
| 198 | static int hrtimer_get_target(int this_cpu, int pinned) | ||
| 199 | { | ||
| 200 | #ifdef CONFIG_NO_HZ | ||
| 201 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) { | ||
| 202 | int preferred_cpu = get_nohz_load_balancer(); | ||
| 203 | |||
| 204 | if (preferred_cpu >= 0) | ||
| 205 | return preferred_cpu; | ||
| 206 | } | ||
| 207 | #endif | ||
| 208 | return this_cpu; | ||
| 209 | } | ||
| 210 | |||
| 211 | /* | ||
| 212 | * With HIGHRES=y we do not migrate the timer when it is expiring | ||
| 213 | * before the next event on the target cpu because we cannot reprogram | ||
| 214 | * the target cpu hardware and we would cause it to fire late. | ||
| 215 | * | ||
| 216 | * Called with cpu_base->lock of target cpu held. | ||
| 217 | */ | ||
| 218 | static int | ||
| 219 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | ||
| 220 | { | ||
| 221 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
| 222 | ktime_t expires; | ||
| 223 | |||
| 224 | if (!new_base->cpu_base->hres_active) | ||
| 225 | return 0; | ||
| 226 | |||
| 227 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); | ||
| 228 | return expires.tv64 <= new_base->cpu_base->expires_next.tv64; | ||
| 229 | #else | ||
| 230 | return 0; | ||
| 231 | #endif | ||
| 232 | } | ||
| 233 | |||
| 194 | /* | 234 | /* | 
| 195 | * Switch the timer base to the current CPU when possible. | 235 | * Switch the timer base to the current CPU when possible. | 
| 196 | */ | 236 | */ | 
| @@ -200,16 +240,8 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
| 200 | { | 240 | { | 
| 201 | struct hrtimer_clock_base *new_base; | 241 | struct hrtimer_clock_base *new_base; | 
| 202 | struct hrtimer_cpu_base *new_cpu_base; | 242 | struct hrtimer_cpu_base *new_cpu_base; | 
| 203 | int cpu, preferred_cpu = -1; | 243 | int this_cpu = smp_processor_id(); | 
| 204 | 244 | int cpu = hrtimer_get_target(this_cpu, pinned); | |
| 205 | cpu = smp_processor_id(); | ||
| 206 | #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP) | ||
| 207 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) { | ||
| 208 | preferred_cpu = get_nohz_load_balancer(); | ||
| 209 | if (preferred_cpu >= 0) | ||
| 210 | cpu = preferred_cpu; | ||
| 211 | } | ||
| 212 | #endif | ||
| 213 | 245 | ||
| 214 | again: | 246 | again: | 
| 215 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); | 247 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); | 
| @@ -217,7 +249,7 @@ again: | |||
| 217 | 249 | ||
| 218 | if (base != new_base) { | 250 | if (base != new_base) { | 
| 219 | /* | 251 | /* | 
| 220 | * We are trying to schedule the timer on the local CPU. | 252 | * We are trying to move timer to new_base. | 
| 221 | * However we can't change timer's base while it is running, | 253 | * However we can't change timer's base while it is running, | 
| 222 | * so we keep it on the same CPU. No hassle vs. reprogramming | 254 | * so we keep it on the same CPU. No hassle vs. reprogramming | 
| 223 | * the event source in the high resolution case. The softirq | 255 | * the event source in the high resolution case. The softirq | 
| @@ -233,38 +265,12 @@ again: | |||
| 233 | spin_unlock(&base->cpu_base->lock); | 265 | spin_unlock(&base->cpu_base->lock); | 
| 234 | spin_lock(&new_base->cpu_base->lock); | 266 | spin_lock(&new_base->cpu_base->lock); | 
| 235 | 267 | ||
| 236 | /* Optimized away for NOHZ=n SMP=n */ | 268 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | 
| 237 | if (cpu == preferred_cpu) { | 269 | cpu = this_cpu; | 
| 238 | /* Calculate clock monotonic expiry time */ | 270 | spin_unlock(&new_base->cpu_base->lock); | 
| 239 | #ifdef CONFIG_HIGH_RES_TIMERS | 271 | spin_lock(&base->cpu_base->lock); | 
| 240 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), | 272 | timer->base = base; | 
| 241 | new_base->offset); | 273 | goto again; | 
| 242 | #else | ||
| 243 | ktime_t expires = hrtimer_get_expires(timer); | ||
| 244 | #endif | ||
| 245 | |||
| 246 | /* | ||
| 247 | * Get the next event on target cpu from the | ||
| 248 | * clock events layer. | ||
| 249 | * This covers the highres=off nohz=on case as well. | ||
| 250 | */ | ||
| 251 | ktime_t next = clockevents_get_next_event(cpu); | ||
| 252 | |||
| 253 | ktime_t delta = ktime_sub(expires, next); | ||
| 254 | |||
| 255 | /* | ||
| 256 | * We do not migrate the timer when it is expiring | ||
| 257 | * before the next event on the target cpu because | ||
| 258 | * we cannot reprogram the target cpu hardware and | ||
| 259 | * we would cause it to fire late. | ||
| 260 | */ | ||
| 261 | if (delta.tv64 < 0) { | ||
| 262 | cpu = smp_processor_id(); | ||
| 263 | spin_unlock(&new_base->cpu_base->lock); | ||
| 264 | spin_lock(&base->cpu_base->lock); | ||
| 265 | timer->base = base; | ||
| 266 | goto again; | ||
| 267 | } | ||
| 268 | } | 274 | } | 
| 269 | timer->base = new_base; | 275 | timer->base = new_base; | 
| 270 | } | 276 | } | 
| @@ -1276,14 +1282,22 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1276 | 1282 | ||
| 1277 | expires_next.tv64 = KTIME_MAX; | 1283 | expires_next.tv64 = KTIME_MAX; | 
| 1278 | 1284 | ||
| 1285 | spin_lock(&cpu_base->lock); | ||
| 1286 | /* | ||
| 1287 | * We set expires_next to KTIME_MAX here with cpu_base->lock | ||
| 1288 | * held to prevent that a timer is enqueued in our queue via | ||
| 1289 | * the migration code. This does not affect enqueueing of | ||
| 1290 | * timers which run their callback and need to be requeued on | ||
| 1291 | * this CPU. | ||
| 1292 | */ | ||
| 1293 | cpu_base->expires_next.tv64 = KTIME_MAX; | ||
| 1294 | |||
| 1279 | base = cpu_base->clock_base; | 1295 | base = cpu_base->clock_base; | 
| 1280 | 1296 | ||
| 1281 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1297 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 
| 1282 | ktime_t basenow; | 1298 | ktime_t basenow; | 
| 1283 | struct rb_node *node; | 1299 | struct rb_node *node; | 
| 1284 | 1300 | ||
| 1285 | spin_lock(&cpu_base->lock); | ||
| 1286 | |||
| 1287 | basenow = ktime_add(now, base->offset); | 1301 | basenow = ktime_add(now, base->offset); | 
| 1288 | 1302 | ||
| 1289 | while ((node = base->first)) { | 1303 | while ((node = base->first)) { | 
| @@ -1316,11 +1330,15 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1316 | 1330 | ||
| 1317 | __run_hrtimer(timer); | 1331 | __run_hrtimer(timer); | 
| 1318 | } | 1332 | } | 
| 1319 | spin_unlock(&cpu_base->lock); | ||
| 1320 | base++; | 1333 | base++; | 
| 1321 | } | 1334 | } | 
| 1322 | 1335 | ||
| 1336 | /* | ||
| 1337 | * Store the new expiry value so the migration code can verify | ||
| 1338 | * against it. | ||
| 1339 | */ | ||
| 1323 | cpu_base->expires_next = expires_next; | 1340 | cpu_base->expires_next = expires_next; | 
| 1341 | spin_unlock(&cpu_base->lock); | ||
| 1324 | 1342 | ||
| 1325 | /* Reprogramming necessary ? */ | 1343 | /* Reprogramming necessary ? */ | 
| 1326 | if (expires_next.tv64 != KTIME_MAX) { | 1344 | if (expires_next.tv64 != KTIME_MAX) { | 
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index c0fa54b276d9..16b5739c516a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c  | |||
| @@ -237,13 +237,9 @@ static int __kprobes collect_garbage_slots(void) | |||
| 237 | { | 237 | { | 
| 238 | struct kprobe_insn_page *kip; | 238 | struct kprobe_insn_page *kip; | 
| 239 | struct hlist_node *pos, *next; | 239 | struct hlist_node *pos, *next; | 
| 240 | int safety; | ||
| 241 | 240 | ||
| 242 | /* Ensure no-one is preepmted on the garbages */ | 241 | /* Ensure no-one is preepmted on the garbages */ | 
| 243 | mutex_unlock(&kprobe_insn_mutex); | 242 | if (check_safety()) | 
| 244 | safety = check_safety(); | ||
| 245 | mutex_lock(&kprobe_insn_mutex); | ||
| 246 | if (safety != 0) | ||
| 247 | return -EAGAIN; | 243 | return -EAGAIN; | 
| 248 | 244 | ||
| 249 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { | 245 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { | 
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index a641eb753b8c..7bc888dfd06a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c  | |||
| @@ -2665,6 +2665,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
| 2665 | header.size += sizeof(cpu_entry); | 2665 | header.size += sizeof(cpu_entry); | 
| 2666 | 2666 | ||
| 2667 | cpu_entry.cpu = raw_smp_processor_id(); | 2667 | cpu_entry.cpu = raw_smp_processor_id(); | 
| 2668 | cpu_entry.reserved = 0; | ||
| 2668 | } | 2669 | } | 
| 2669 | 2670 | ||
| 2670 | if (sample_type & PERF_SAMPLE_PERIOD) | 2671 | if (sample_type & PERF_SAMPLE_PERIOD) | 
diff --git a/kernel/pid.c b/kernel/pid.c index 5fa1db48d8b7..31310b5d3f50 100644 --- a/kernel/pid.c +++ b/kernel/pid.c  | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/pid_namespace.h> | 36 | #include <linux/pid_namespace.h> | 
| 37 | #include <linux/init_task.h> | 37 | #include <linux/init_task.h> | 
| 38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> | 
| 39 | #include <linux/kmemleak.h> | ||
| 40 | 39 | ||
| 41 | #define pid_hashfn(nr, ns) \ | 40 | #define pid_hashfn(nr, ns) \ | 
| 42 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) | 41 | hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) | 
| @@ -513,12 +512,6 @@ void __init pidhash_init(void) | |||
| 513 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); | 512 | pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); | 
| 514 | if (!pid_hash) | 513 | if (!pid_hash) | 
| 515 | panic("Could not alloc pidhash!\n"); | 514 | panic("Could not alloc pidhash!\n"); | 
| 516 | /* | ||
| 517 | * pid_hash contains references to allocated struct pid objects and it | ||
| 518 | * must be scanned by kmemleak to avoid false positives. | ||
| 519 | */ | ||
| 520 | kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0, | ||
| 521 | GFP_KERNEL); | ||
| 522 | for (i = 0; i < pidhash_size; i++) | 515 | for (i = 0; i < pidhash_size; i++) | 
| 523 | INIT_HLIST_HEAD(&pid_hash[i]); | 516 | INIT_HLIST_HEAD(&pid_hash[i]); | 
| 524 | } | 517 | } | 
diff --git a/kernel/power/user.c b/kernel/power/user.c index ed97375daae9..bf0014d6a5f0 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c  | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/console.h> | 23 | #include <linux/console.h> | 
| 24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> | 
| 25 | #include <linux/freezer.h> | 25 | #include <linux/freezer.h> | 
| 26 | #include <linux/smp_lock.h> | ||
| 27 | #include <scsi/scsi_scan.h> | 26 | #include <scsi/scsi_scan.h> | 
| 28 | 27 | ||
| 29 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> | 
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 0dccfbba6d26..7717b95c2027 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c  | |||
| @@ -1533,7 +1533,7 @@ void __init __rcu_init(void) | |||
| 1533 | int j; | 1533 | int j; | 
| 1534 | struct rcu_node *rnp; | 1534 | struct rcu_node *rnp; | 
| 1535 | 1535 | ||
| 1536 | printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n"); | 1536 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 
| 1537 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1537 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 
| 1538 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1538 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 
| 1539 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1539 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 
| @@ -1546,7 +1546,6 @@ void __init __rcu_init(void) | |||
| 1546 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | 1546 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | 
| 1547 | /* Register notifier for non-boot CPUs */ | 1547 | /* Register notifier for non-boot CPUs */ | 
| 1548 | register_cpu_notifier(&rcu_nb); | 1548 | register_cpu_notifier(&rcu_nb); | 
| 1549 | printk(KERN_WARNING "Experimental hierarchical RCU init done.\n"); | ||
| 1550 | } | 1549 | } | 
| 1551 | 1550 | ||
| 1552 | module_param(blimit, int, 0); | 1551 | module_param(blimit, int, 0); | 
diff --git a/kernel/sched.c b/kernel/sched.c index 7c9098d186e6..98972d366fdc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c  | |||
| @@ -493,6 +493,7 @@ struct rt_rq { | |||
| 493 | #endif | 493 | #endif | 
| 494 | #ifdef CONFIG_SMP | 494 | #ifdef CONFIG_SMP | 
| 495 | unsigned long rt_nr_migratory; | 495 | unsigned long rt_nr_migratory; | 
| 496 | unsigned long rt_nr_total; | ||
| 496 | int overloaded; | 497 | int overloaded; | 
| 497 | struct plist_head pushable_tasks; | 498 | struct plist_head pushable_tasks; | 
| 498 | #endif | 499 | #endif | 
| @@ -2571,15 +2572,37 @@ static void __sched_fork(struct task_struct *p) | |||
| 2571 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | 2572 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | 
| 2572 | 2573 | ||
| 2573 | #ifdef CONFIG_SCHEDSTATS | 2574 | #ifdef CONFIG_SCHEDSTATS | 
| 2574 | p->se.wait_start = 0; | 2575 | p->se.wait_start = 0; | 
| 2575 | p->se.sum_sleep_runtime = 0; | 2576 | p->se.wait_max = 0; | 
| 2576 | p->se.sleep_start = 0; | 2577 | p->se.wait_count = 0; | 
| 2577 | p->se.block_start = 0; | 2578 | p->se.wait_sum = 0; | 
| 2578 | p->se.sleep_max = 0; | 2579 | |
| 2579 | p->se.block_max = 0; | 2580 | p->se.sleep_start = 0; | 
| 2580 | p->se.exec_max = 0; | 2581 | p->se.sleep_max = 0; | 
| 2581 | p->se.slice_max = 0; | 2582 | p->se.sum_sleep_runtime = 0; | 
| 2582 | p->se.wait_max = 0; | 2583 | |
| 2584 | p->se.block_start = 0; | ||
| 2585 | p->se.block_max = 0; | ||
| 2586 | p->se.exec_max = 0; | ||
| 2587 | p->se.slice_max = 0; | ||
| 2588 | |||
| 2589 | p->se.nr_migrations_cold = 0; | ||
| 2590 | p->se.nr_failed_migrations_affine = 0; | ||
| 2591 | p->se.nr_failed_migrations_running = 0; | ||
| 2592 | p->se.nr_failed_migrations_hot = 0; | ||
| 2593 | p->se.nr_forced_migrations = 0; | ||
| 2594 | p->se.nr_forced2_migrations = 0; | ||
| 2595 | |||
| 2596 | p->se.nr_wakeups = 0; | ||
| 2597 | p->se.nr_wakeups_sync = 0; | ||
| 2598 | p->se.nr_wakeups_migrate = 0; | ||
| 2599 | p->se.nr_wakeups_local = 0; | ||
| 2600 | p->se.nr_wakeups_remote = 0; | ||
| 2601 | p->se.nr_wakeups_affine = 0; | ||
| 2602 | p->se.nr_wakeups_affine_attempts = 0; | ||
| 2603 | p->se.nr_wakeups_passive = 0; | ||
| 2604 | p->se.nr_wakeups_idle = 0; | ||
| 2605 | |||
| 2583 | #endif | 2606 | #endif | 
| 2584 | 2607 | ||
| 2585 | INIT_LIST_HEAD(&p->rt.run_list); | 2608 | INIT_LIST_HEAD(&p->rt.run_list); | 
| @@ -6541,6 +6564,11 @@ SYSCALL_DEFINE0(sched_yield) | |||
| 6541 | return 0; | 6564 | return 0; | 
| 6542 | } | 6565 | } | 
| 6543 | 6566 | ||
| 6567 | static inline int should_resched(void) | ||
| 6568 | { | ||
| 6569 | return need_resched() && !(preempt_count() & PREEMPT_ACTIVE); | ||
| 6570 | } | ||
| 6571 | |||
| 6544 | static void __cond_resched(void) | 6572 | static void __cond_resched(void) | 
| 6545 | { | 6573 | { | 
| 6546 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 6574 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 
| @@ -6560,8 +6588,7 @@ static void __cond_resched(void) | |||
| 6560 | 6588 | ||
| 6561 | int __sched _cond_resched(void) | 6589 | int __sched _cond_resched(void) | 
| 6562 | { | 6590 | { | 
| 6563 | if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && | 6591 | if (should_resched()) { | 
| 6564 | system_state == SYSTEM_RUNNING) { | ||
| 6565 | __cond_resched(); | 6592 | __cond_resched(); | 
| 6566 | return 1; | 6593 | return 1; | 
| 6567 | } | 6594 | } | 
| @@ -6579,12 +6606,12 @@ EXPORT_SYMBOL(_cond_resched); | |||
| 6579 | */ | 6606 | */ | 
| 6580 | int cond_resched_lock(spinlock_t *lock) | 6607 | int cond_resched_lock(spinlock_t *lock) | 
| 6581 | { | 6608 | { | 
| 6582 | int resched = need_resched() && system_state == SYSTEM_RUNNING; | 6609 | int resched = should_resched(); | 
| 6583 | int ret = 0; | 6610 | int ret = 0; | 
| 6584 | 6611 | ||
| 6585 | if (spin_needbreak(lock) || resched) { | 6612 | if (spin_needbreak(lock) || resched) { | 
| 6586 | spin_unlock(lock); | 6613 | spin_unlock(lock); | 
| 6587 | if (resched && need_resched()) | 6614 | if (resched) | 
| 6588 | __cond_resched(); | 6615 | __cond_resched(); | 
| 6589 | else | 6616 | else | 
| 6590 | cpu_relax(); | 6617 | cpu_relax(); | 
| @@ -6599,7 +6626,7 @@ int __sched cond_resched_softirq(void) | |||
| 6599 | { | 6626 | { | 
| 6600 | BUG_ON(!in_softirq()); | 6627 | BUG_ON(!in_softirq()); | 
| 6601 | 6628 | ||
| 6602 | if (need_resched() && system_state == SYSTEM_RUNNING) { | 6629 | if (should_resched()) { | 
| 6603 | local_bh_enable(); | 6630 | local_bh_enable(); | 
| 6604 | __cond_resched(); | 6631 | __cond_resched(); | 
| 6605 | local_bh_disable(); | 6632 | local_bh_disable(); | 
| @@ -9070,7 +9097,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
| 9070 | #ifdef CONFIG_SMP | 9097 | #ifdef CONFIG_SMP | 
| 9071 | rt_rq->rt_nr_migratory = 0; | 9098 | rt_rq->rt_nr_migratory = 0; | 
| 9072 | rt_rq->overloaded = 0; | 9099 | rt_rq->overloaded = 0; | 
| 9073 | plist_head_init(&rq->rt.pushable_tasks, &rq->lock); | 9100 | plist_head_init(&rt_rq->pushable_tasks, &rq->lock); | 
| 9074 | #endif | 9101 | #endif | 
| 9075 | 9102 | ||
| 9076 | rt_rq->rt_time = 0; | 9103 | rt_rq->rt_time = 0; | 
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ba7fd6e9556f..7c248dc30f41 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c  | |||
| @@ -687,7 +687,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
| 687 | * all of which have the same weight. | 687 | * all of which have the same weight. | 
| 688 | */ | 688 | */ | 
| 689 | if (sched_feat(NORMALIZED_SLEEPER) && | 689 | if (sched_feat(NORMALIZED_SLEEPER) && | 
| 690 | task_of(se)->policy != SCHED_IDLE) | 690 | (!entity_is_task(se) || | 
| 691 | task_of(se)->policy != SCHED_IDLE)) | ||
| 691 | thresh = calc_delta_fair(thresh, se); | 692 | thresh = calc_delta_fair(thresh, se); | 
| 692 | 693 | ||
| 693 | vruntime -= thresh; | 694 | vruntime -= thresh; | 
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 9bf0d2a73045..3918e01994e0 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c  | |||
| @@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | |||
| 10 | 10 | ||
| 11 | #ifdef CONFIG_RT_GROUP_SCHED | 11 | #ifdef CONFIG_RT_GROUP_SCHED | 
| 12 | 12 | ||
| 13 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) | ||
| 14 | |||
| 13 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 15 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 
| 14 | { | 16 | { | 
| 15 | return rt_rq->rq; | 17 | return rt_rq->rq; | 
| @@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | |||
| 22 | 24 | ||
| 23 | #else /* CONFIG_RT_GROUP_SCHED */ | 25 | #else /* CONFIG_RT_GROUP_SCHED */ | 
| 24 | 26 | ||
| 27 | #define rt_entity_is_task(rt_se) (1) | ||
| 28 | |||
| 25 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 29 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 
| 26 | { | 30 | { | 
| 27 | return container_of(rt_rq, struct rq, rt); | 31 | return container_of(rt_rq, struct rq, rt); | 
| @@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq) | |||
| 73 | 77 | ||
| 74 | static void update_rt_migration(struct rt_rq *rt_rq) | 78 | static void update_rt_migration(struct rt_rq *rt_rq) | 
| 75 | { | 79 | { | 
| 76 | if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) { | 80 | if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { | 
| 77 | if (!rt_rq->overloaded) { | 81 | if (!rt_rq->overloaded) { | 
| 78 | rt_set_overload(rq_of_rt_rq(rt_rq)); | 82 | rt_set_overload(rq_of_rt_rq(rt_rq)); | 
| 79 | rt_rq->overloaded = 1; | 83 | rt_rq->overloaded = 1; | 
| @@ -86,6 +90,12 @@ static void update_rt_migration(struct rt_rq *rt_rq) | |||
| 86 | 90 | ||
| 87 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 91 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 
| 88 | { | 92 | { | 
| 93 | if (!rt_entity_is_task(rt_se)) | ||
| 94 | return; | ||
| 95 | |||
| 96 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; | ||
| 97 | |||
| 98 | rt_rq->rt_nr_total++; | ||
| 89 | if (rt_se->nr_cpus_allowed > 1) | 99 | if (rt_se->nr_cpus_allowed > 1) | 
| 90 | rt_rq->rt_nr_migratory++; | 100 | rt_rq->rt_nr_migratory++; | 
| 91 | 101 | ||
| @@ -94,6 +104,12 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 94 | 104 | ||
| 95 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 105 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 
| 96 | { | 106 | { | 
| 107 | if (!rt_entity_is_task(rt_se)) | ||
| 108 | return; | ||
| 109 | |||
| 110 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; | ||
| 111 | |||
| 112 | rt_rq->rt_nr_total--; | ||
| 97 | if (rt_se->nr_cpus_allowed > 1) | 113 | if (rt_se->nr_cpus_allowed > 1) | 
| 98 | rt_rq->rt_nr_migratory--; | 114 | rt_rq->rt_nr_migratory--; | 
| 99 | 115 | ||
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1ad6dd461119..a6dcd67b041d 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c  | |||
| @@ -254,15 +254,4 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 254 | spin_unlock(&clockevents_lock); | 254 | spin_unlock(&clockevents_lock); | 
| 255 | } | 255 | } | 
| 256 | EXPORT_SYMBOL_GPL(clockevents_notify); | 256 | EXPORT_SYMBOL_GPL(clockevents_notify); | 
| 257 | |||
| 258 | ktime_t clockevents_get_next_event(int cpu) | ||
| 259 | { | ||
| 260 | struct tick_device *td; | ||
| 261 | struct clock_event_device *dev; | ||
| 262 | |||
| 263 | td = &per_cpu(tick_cpu_device, cpu); | ||
| 264 | dev = td->evtdev; | ||
| 265 | |||
| 266 | return dev->next_event; | ||
| 267 | } | ||
| 268 | #endif | 257 | #endif | 
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 1551f47e7669..019f380fd764 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig  | |||
| @@ -226,13 +226,13 @@ config BOOT_TRACER | |||
| 226 | the timings of the initcalls and traces key events and the identity | 226 | the timings of the initcalls and traces key events and the identity | 
| 227 | of tasks that can cause boot delays, such as context-switches. | 227 | of tasks that can cause boot delays, such as context-switches. | 
| 228 | 228 | ||
| 229 | Its aim is to be parsed by the /scripts/bootgraph.pl tool to | 229 | Its aim is to be parsed by the scripts/bootgraph.pl tool to | 
| 230 | produce pretty graphics about boot inefficiencies, giving a visual | 230 | produce pretty graphics about boot inefficiencies, giving a visual | 
| 231 | representation of the delays during initcalls - but the raw | 231 | representation of the delays during initcalls - but the raw | 
| 232 | /debug/tracing/trace text output is readable too. | 232 | /debug/tracing/trace text output is readable too. | 
| 233 | 233 | ||
| 234 | You must pass in ftrace=initcall to the kernel command line | 234 | You must pass in initcall_debug and ftrace=initcall to the kernel | 
| 235 | to enable this on bootup. | 235 | command line to enable this on bootup. | 
| 236 | 236 | ||
| 237 | config TRACE_BRANCH_PROFILING | 237 | config TRACE_BRANCH_PROFILING | 
| 238 | bool | 238 | bool | 
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 39af8af6fc30..1090b0aed9ba 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c  | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/init.h> | 22 | #include <linux/init.h> | 
| 23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> | 
| 24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> | 
| 25 | #include <linux/smp_lock.h> | ||
| 25 | #include <linux/time.h> | 26 | #include <linux/time.h> | 
| 26 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> | 
| 27 | 28 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f3716bf04df6..4521c77d1a1a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c  | |||
| @@ -768,7 +768,7 @@ static struct tracer_stat function_stats __initdata = { | |||
| 768 | .stat_show = function_stat_show | 768 | .stat_show = function_stat_show | 
| 769 | }; | 769 | }; | 
| 770 | 770 | ||
| 771 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | 771 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | 
| 772 | { | 772 | { | 
| 773 | struct ftrace_profile_stat *stat; | 773 | struct ftrace_profile_stat *stat; | 
| 774 | struct dentry *entry; | 774 | struct dentry *entry; | 
| @@ -786,7 +786,6 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
| 786 | * The files created are permanent, if something happens | 786 | * The files created are permanent, if something happens | 
| 787 | * we still do not free memory. | 787 | * we still do not free memory. | 
| 788 | */ | 788 | */ | 
| 789 | kfree(stat); | ||
| 790 | WARN(1, | 789 | WARN(1, | 
| 791 | "Could not allocate stat file for cpu %d\n", | 790 | "Could not allocate stat file for cpu %d\n", | 
| 792 | cpu); | 791 | cpu); | 
| @@ -813,7 +812,7 @@ static void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
| 813 | } | 812 | } | 
| 814 | 813 | ||
| 815 | #else /* CONFIG_FUNCTION_PROFILER */ | 814 | #else /* CONFIG_FUNCTION_PROFILER */ | 
| 816 | static void ftrace_profile_debugfs(struct dentry *d_tracer) | 815 | static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | 
| 817 | { | 816 | { | 
| 818 | } | 817 | } | 
| 819 | #endif /* CONFIG_FUNCTION_PROFILER */ | 818 | #endif /* CONFIG_FUNCTION_PROFILER */ | 
| @@ -3160,10 +3159,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
| 3160 | 3159 | ||
| 3161 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 3160 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 
| 3162 | 3161 | ||
| 3163 | if (ret || !write || (last_ftrace_enabled == ftrace_enabled)) | 3162 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) | 
| 3164 | goto out; | 3163 | goto out; | 
| 3165 | 3164 | ||
| 3166 | last_ftrace_enabled = ftrace_enabled; | 3165 | last_ftrace_enabled = !!ftrace_enabled; | 
| 3167 | 3166 | ||
| 3168 | if (ftrace_enabled) { | 3167 | if (ftrace_enabled) { | 
| 3169 | 3168 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3aa0a0dfdfa8..8bc8d8afea6a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c  | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> | 
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> | 
| 19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> | 
| 20 | #include <linux/smp_lock.h> | ||
| 20 | #include <linux/notifier.h> | 21 | #include <linux/notifier.h> | 
| 21 | #include <linux/irqflags.h> | 22 | #include <linux/irqflags.h> | 
| 22 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> | 
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index 5e32e375134d..6db005e12487 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h  | |||
| @@ -26,6 +26,9 @@ TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET, | |||
| 26 | ftrace_graph_ret_entry, ignore, | 26 | ftrace_graph_ret_entry, ignore, | 
| 27 | TRACE_STRUCT( | 27 | TRACE_STRUCT( | 
| 28 | TRACE_FIELD(unsigned long, ret.func, func) | 28 | TRACE_FIELD(unsigned long, ret.func, func) | 
| 29 | TRACE_FIELD(unsigned long long, ret.calltime, calltime) | ||
| 30 | TRACE_FIELD(unsigned long long, ret.rettime, rettime) | ||
| 31 | TRACE_FIELD(unsigned long, ret.overrun, overrun) | ||
| 29 | TRACE_FIELD(int, ret.depth, depth) | 32 | TRACE_FIELD(int, ret.depth, depth) | 
| 30 | ), | 33 | ), | 
| 31 | TP_RAW_FMT("<-- %lx (%d)") | 34 | TP_RAW_FMT("<-- %lx (%d)") | 
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 7402144bff21..75ef000613c3 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c  | |||
| @@ -363,7 +363,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) | |||
| 363 | out_reg: | 363 | out_reg: | 
| 364 | ret = register_ftrace_function_probe(glob, ops, count); | 364 | ret = register_ftrace_function_probe(glob, ops, count); | 
| 365 | 365 | ||
| 366 | return ret; | 366 | return ret < 0 ? ret : 0; | 
| 367 | } | 367 | } | 
| 368 | 368 | ||
| 369 | static struct ftrace_func_command ftrace_traceon_cmd = { | 369 | static struct ftrace_func_command ftrace_traceon_cmd = { | 
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 7938f3ae93e3..e0c2545622e8 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c  | |||
| @@ -27,8 +27,7 @@ void trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
| 27 | { | 27 | { | 
| 28 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | 28 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | 
| 29 | 29 | ||
| 30 | s->buffer[len] = 0; | 30 | seq_write(m, s->buffer, len); | 
| 31 | seq_puts(m, s->buffer); | ||
| 32 | 31 | ||
| 33 | trace_seq_init(s); | 32 | trace_seq_init(s); | 
| 34 | } | 33 | } | 
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 2d7aebd71dbd..e644af910124 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c  | |||
| @@ -326,10 +326,10 @@ stack_trace_sysctl(struct ctl_table *table, int write, | |||
| 326 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 326 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 
| 327 | 327 | ||
| 328 | if (ret || !write || | 328 | if (ret || !write || | 
| 329 | (last_stack_tracer_enabled == stack_tracer_enabled)) | 329 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) | 
| 330 | goto out; | 330 | goto out; | 
| 331 | 331 | ||
| 332 | last_stack_tracer_enabled = stack_tracer_enabled; | 332 | last_stack_tracer_enabled = !!stack_tracer_enabled; | 
| 333 | 333 | ||
| 334 | if (stack_tracer_enabled) | 334 | if (stack_tracer_enabled) | 
| 335 | register_ftrace_function(&trace_ops); | 335 | register_ftrace_function(&trace_ops); | 
