aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/printk.c4
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c8
3 files changed, 13 insertions, 9 deletions
diff --git a/kernel/printk.c b/kernel/printk.c
index 58bbec684119..29ae1e99cde0 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -455,10 +455,10 @@ static int __init ignore_loglevel_setup(char *str)
455 ignore_loglevel = 1; 455 ignore_loglevel = 1;
456 printk(KERN_INFO "debug: ignoring loglevel setting.\n"); 456 printk(KERN_INFO "debug: ignoring loglevel setting.\n");
457 457
458 return 1; 458 return 0;
459} 459}
460 460
461__setup("ignore_loglevel", ignore_loglevel_setup); 461early_param("ignore_loglevel", ignore_loglevel_setup);
462 462
463/* 463/*
464 * Write out chars from start to end - 1 inclusive 464 * Write out chars from start to end - 1 inclusive
diff --git a/kernel/sched.c b/kernel/sched.c
index ba4c88088f62..8355e007e021 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1255,12 +1255,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1255 1255
1256#define sched_class_highest (&rt_sched_class) 1256#define sched_class_highest (&rt_sched_class)
1257 1257
1258static void inc_nr_running(struct task_struct *p, struct rq *rq) 1258static void inc_nr_running(struct rq *rq)
1259{ 1259{
1260 rq->nr_running++; 1260 rq->nr_running++;
1261} 1261}
1262 1262
1263static void dec_nr_running(struct task_struct *p, struct rq *rq) 1263static void dec_nr_running(struct rq *rq)
1264{ 1264{
1265 rq->nr_running--; 1265 rq->nr_running--;
1266} 1266}
@@ -1354,7 +1354,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
1354 rq->nr_uninterruptible--; 1354 rq->nr_uninterruptible--;
1355 1355
1356 enqueue_task(rq, p, wakeup); 1356 enqueue_task(rq, p, wakeup);
1357 inc_nr_running(p, rq); 1357 inc_nr_running(rq);
1358} 1358}
1359 1359
1360/* 1360/*
@@ -1366,7 +1366,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
1366 rq->nr_uninterruptible++; 1366 rq->nr_uninterruptible++;
1367 1367
1368 dequeue_task(rq, p, sleep); 1368 dequeue_task(rq, p, sleep);
1369 dec_nr_running(p, rq); 1369 dec_nr_running(rq);
1370} 1370}
1371 1371
1372/** 1372/**
@@ -2006,7 +2006,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2006 * management (if any): 2006 * management (if any):
2007 */ 2007 */
2008 p->sched_class->task_new(rq, p); 2008 p->sched_class->task_new(rq, p);
2009 inc_nr_running(p, rq); 2009 inc_nr_running(rq);
2010 } 2010 }
2011 check_preempt_curr(rq, p); 2011 check_preempt_curr(rq, p);
2012#ifdef CONFIG_SMP 2012#ifdef CONFIG_SMP
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 72e25c7a3a18..6c091d6e159d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -520,7 +520,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
520 520
521 if (!initial) { 521 if (!initial) {
522 /* sleeps upto a single latency don't count. */ 522 /* sleeps upto a single latency don't count. */
523 if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) 523 if (sched_feat(NEW_FAIR_SLEEPERS))
524 vruntime -= sysctl_sched_latency; 524 vruntime -= sysctl_sched_latency;
525 525
526 /* ensure we never gain time by being placed backwards. */ 526 /* ensure we never gain time by being placed backwards. */
@@ -1106,7 +1106,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
1106 } 1106 }
1107 1107
1108 gran = sysctl_sched_wakeup_granularity; 1108 gran = sysctl_sched_wakeup_granularity;
1109 if (unlikely(se->load.weight != NICE_0_LOAD)) 1109 /*
1110 * More easily preempt - nice tasks, while not making
1111 * it harder for + nice tasks.
1112 */
1113 if (unlikely(se->load.weight > NICE_0_LOAD))
1110 gran = calc_delta_fair(gran, &se->load); 1114 gran = calc_delta_fair(gran, &se->load);
1111 1115
1112 if (pse->vruntime + gran < se->vruntime) 1116 if (pse->vruntime + gran < se->vruntime)