aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/sched.c133
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/trace/trace_event_perf.c4
7 files changed, 91 insertions, 80 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index e7a35f1039e7..6a3a5fa1526d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -429,20 +429,11 @@ static void free_pi_state(struct futex_pi_state *pi_state)
429static struct task_struct * futex_find_get_task(pid_t pid) 429static struct task_struct * futex_find_get_task(pid_t pid)
430{ 430{
431 struct task_struct *p; 431 struct task_struct *p;
432 const struct cred *cred = current_cred(), *pcred;
433 432
434 rcu_read_lock(); 433 rcu_read_lock();
435 p = find_task_by_vpid(pid); 434 p = find_task_by_vpid(pid);
436 if (!p) { 435 if (p)
437 p = ERR_PTR(-ESRCH); 436 get_task_struct(p);
438 } else {
439 pcred = __task_cred(p);
440 if (cred->euid != pcred->euid &&
441 cred->euid != pcred->uid)
442 p = ERR_PTR(-ESRCH);
443 else
444 get_task_struct(p);
445 }
446 437
447 rcu_read_unlock(); 438 rcu_read_unlock();
448 439
@@ -564,8 +555,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
564 if (!pid) 555 if (!pid)
565 return -ESRCH; 556 return -ESRCH;
566 p = futex_find_get_task(pid); 557 p = futex_find_get_task(pid);
567 if (IS_ERR(p)) 558 if (!p)
568 return PTR_ERR(p); 559 return -ESRCH;
569 560
570 /* 561 /*
571 * We need to look at the task state flags to figure out, 562 * We need to look at the task state flags to figure out,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3164ba7ce151..e1497481fe8a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -456,6 +456,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
456 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 456 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
458 desc->status |= flags; 458 desc->status |= flags;
459
460 if (chip != desc->chip)
461 irq_chip_set_defaults(desc->chip);
459 } 462 }
460 463
461 return ret; 464 return ret;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 474a84715eac..131b1703936f 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1089,9 +1089,10 @@ void crash_kexec(struct pt_regs *regs)
1089 1089
1090size_t crash_get_memory_size(void) 1090size_t crash_get_memory_size(void)
1091{ 1091{
1092 size_t size; 1092 size_t size = 0;
1093 mutex_lock(&kexec_mutex); 1093 mutex_lock(&kexec_mutex);
1094 size = crashk_res.end - crashk_res.start + 1; 1094 if (crashk_res.end != crashk_res.start)
1095 size = crashk_res.end - crashk_res.start + 1;
1095 mutex_unlock(&kexec_mutex); 1096 mutex_unlock(&kexec_mutex);
1096 return size; 1097 return size;
1097} 1098}
@@ -1134,7 +1135,7 @@ int crash_shrink_memory(unsigned long new_size)
1134 1135
1135 free_reserved_phys_range(end, crashk_res.end); 1136 free_reserved_phys_range(end, crashk_res.end);
1136 1137
1137 if (start == end) 1138 if ((start == end) && (crashk_res.parent != NULL))
1138 release_resource(&crashk_res); 1139 release_resource(&crashk_res);
1139 crashk_res.end = end - 1; 1140 crashk_res.end = end - 1;
1140 1141
diff --git a/kernel/sched.c b/kernel/sched.c
index f8b8996228dd..cb816e36cc8b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -306,52 +306,6 @@ static int init_task_group_load = INIT_TASK_GROUP_LOAD;
306 */ 306 */
307struct task_group init_task_group; 307struct task_group init_task_group;
308 308
309/* return group to which a task belongs */
310static inline struct task_group *task_group(struct task_struct *p)
311{
312 struct task_group *tg;
313
314#ifdef CONFIG_CGROUP_SCHED
315 tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
316 struct task_group, css);
317#else
318 tg = &init_task_group;
319#endif
320 return tg;
321}
322
323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
324static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
325{
326 /*
327 * Strictly speaking this rcu_read_lock() is not needed since the
328 * task_group is tied to the cgroup, which in turn can never go away
329 * as long as there are tasks attached to it.
330 *
331 * However since task_group() uses task_subsys_state() which is an
332 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
333 */
334 rcu_read_lock();
335#ifdef CONFIG_FAIR_GROUP_SCHED
336 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
337 p->se.parent = task_group(p)->se[cpu];
338#endif
339
340#ifdef CONFIG_RT_GROUP_SCHED
341 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
342 p->rt.parent = task_group(p)->rt_se[cpu];
343#endif
344 rcu_read_unlock();
345}
346
347#else
348
349static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
350static inline struct task_group *task_group(struct task_struct *p)
351{
352 return NULL;
353}
354
355#endif /* CONFIG_CGROUP_SCHED */ 309#endif /* CONFIG_CGROUP_SCHED */
356 310
357/* CFS-related fields in a runqueue */ 311/* CFS-related fields in a runqueue */
@@ -644,6 +598,49 @@ static inline int cpu_of(struct rq *rq)
644#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 598#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
645#define raw_rq() (&__raw_get_cpu_var(runqueues)) 599#define raw_rq() (&__raw_get_cpu_var(runqueues))
646 600
601#ifdef CONFIG_CGROUP_SCHED
602
603/*
604 * Return the group to which this tasks belongs.
605 *
606 * We use task_subsys_state_check() and extend the RCU verification
607 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
608 * holds that lock for each task it moves into the cgroup. Therefore
609 * by holding that lock, we pin the task to the current cgroup.
610 */
611static inline struct task_group *task_group(struct task_struct *p)
612{
613 struct cgroup_subsys_state *css;
614
615 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
616 lockdep_is_held(&task_rq(p)->lock));
617 return container_of(css, struct task_group, css);
618}
619
620/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
621static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
622{
623#ifdef CONFIG_FAIR_GROUP_SCHED
624 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
625 p->se.parent = task_group(p)->se[cpu];
626#endif
627
628#ifdef CONFIG_RT_GROUP_SCHED
629 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
630 p->rt.parent = task_group(p)->rt_se[cpu];
631#endif
632}
633
634#else /* CONFIG_CGROUP_SCHED */
635
636static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
637static inline struct task_group *task_group(struct task_struct *p)
638{
639 return NULL;
640}
641
642#endif /* CONFIG_CGROUP_SCHED */
643
647inline void update_rq_clock(struct rq *rq) 644inline void update_rq_clock(struct rq *rq)
648{ 645{
649 if (!rq->skip_clock_update) 646 if (!rq->skip_clock_update)
@@ -1257,6 +1254,12 @@ static void sched_avg_update(struct rq *rq)
1257 s64 period = sched_avg_period(); 1254 s64 period = sched_avg_period();
1258 1255
1259 while ((s64)(rq->clock - rq->age_stamp) > period) { 1256 while ((s64)(rq->clock - rq->age_stamp) > period) {
1257 /*
1258 * Inline assembly required to prevent the compiler
1259 * optimising this loop into a divmod call.
1260 * See __iter_div_u64_rem() for another example of this.
1261 */
1262 asm("" : "+rm" (rq->age_stamp));
1260 rq->age_stamp += period; 1263 rq->age_stamp += period;
1261 rq->rt_avg /= 2; 1264 rq->rt_avg /= 2;
1262 } 1265 }
@@ -1660,9 +1663,6 @@ static void update_shares(struct sched_domain *sd)
1660 1663
1661static void update_h_load(long cpu) 1664static void update_h_load(long cpu)
1662{ 1665{
1663 if (root_task_group_empty())
1664 return;
1665
1666 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); 1666 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1667} 1667}
1668 1668
@@ -2494,7 +2494,16 @@ void sched_fork(struct task_struct *p, int clone_flags)
2494 if (p->sched_class->task_fork) 2494 if (p->sched_class->task_fork)
2495 p->sched_class->task_fork(p); 2495 p->sched_class->task_fork(p);
2496 2496
2497 /*
2498 * The child is not yet in the pid-hash so no cgroup attach races,
2499 * and the cgroup is pinned to this child due to cgroup_fork()
2500 * is ran before sched_fork().
2501 *
2502 * Silence PROVE_RCU.
2503 */
2504 rcu_read_lock();
2497 set_task_cpu(p, cpu); 2505 set_task_cpu(p, cpu);
2506 rcu_read_unlock();
2498 2507
2499#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2508#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2500 if (likely(sched_info_on())) 2509 if (likely(sched_info_on()))
@@ -4465,16 +4474,6 @@ recheck:
4465 } 4474 }
4466 4475
4467 if (user) { 4476 if (user) {
4468#ifdef CONFIG_RT_GROUP_SCHED
4469 /*
4470 * Do not allow realtime tasks into groups that have no runtime
4471 * assigned.
4472 */
4473 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4474 task_group(p)->rt_bandwidth.rt_runtime == 0)
4475 return -EPERM;
4476#endif
4477
4478 retval = security_task_setscheduler(p, policy, param); 4477 retval = security_task_setscheduler(p, policy, param);
4479 if (retval) 4478 if (retval)
4480 return retval; 4479 return retval;
@@ -4490,6 +4489,22 @@ recheck:
4490 * runqueue lock must be held. 4489 * runqueue lock must be held.
4491 */ 4490 */
4492 rq = __task_rq_lock(p); 4491 rq = __task_rq_lock(p);
4492
4493#ifdef CONFIG_RT_GROUP_SCHED
4494 if (user) {
4495 /*
4496 * Do not allow realtime tasks into groups that have no runtime
4497 * assigned.
4498 */
4499 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4500 task_group(p)->rt_bandwidth.rt_runtime == 0) {
4501 __task_rq_unlock(rq);
4502 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4503 return -EPERM;
4504 }
4505 }
4506#endif
4507
4493 /* recheck policy now with rq lock held */ 4508 /* recheck policy now with rq lock held */
4494 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 4509 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4495 policy = oldpolicy = -1; 4510 policy = oldpolicy = -1;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index eed35eded602..a878b5332daa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1240 * effect of the currently running task from the load 1240 * effect of the currently running task from the load
1241 * of the current CPU: 1241 * of the current CPU:
1242 */ 1242 */
1243 rcu_read_lock();
1243 if (sync) { 1244 if (sync) {
1244 tg = task_group(current); 1245 tg = task_group(current);
1245 weight = current->se.load.weight; 1246 weight = current->se.load.weight;
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1275 balanced = this_eff_load <= prev_eff_load; 1276 balanced = this_eff_load <= prev_eff_load;
1276 } else 1277 } else
1277 balanced = true; 1278 balanced = true;
1279 rcu_read_unlock();
1278 1280
1279 /* 1281 /*
1280 * If the currently running task will sleep within 1282 * If the currently running task will sleep within
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1d7b9bc1c034..783fbadf2202 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -315,9 +315,6 @@ void tick_nohz_stop_sched_tick(int inidle)
315 goto end; 315 goto end;
316 } 316 }
317 317
318 if (nohz_ratelimit(cpu))
319 goto end;
320
321 ts->idle_calls++; 318 ts->idle_calls++;
322 /* Read jiffies and the time when jiffies were updated last */ 319 /* Read jiffies and the time when jiffies were updated last */
323 do { 320 do {
@@ -328,7 +325,7 @@ void tick_nohz_stop_sched_tick(int inidle)
328 } while (read_seqretry(&xtime_lock, seq)); 325 } while (read_seqretry(&xtime_lock, seq));
329 326
330 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || 327 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
331 arch_needs_cpu(cpu)) { 328 arch_needs_cpu(cpu) || nohz_ratelimit(cpu)) {
332 next_jiffies = last_jiffies + 1; 329 next_jiffies = last_jiffies + 1;
333 delta_jiffies = 1; 330 delta_jiffies = 1;
334 } else { 331 } else {
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index e6f65887842c..8a2b73f7c068 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -96,7 +96,9 @@ int perf_trace_init(struct perf_event *p_event)
96 mutex_lock(&event_mutex); 96 mutex_lock(&event_mutex);
97 list_for_each_entry(tp_event, &ftrace_events, list) { 97 list_for_each_entry(tp_event, &ftrace_events, list) {
98 if (tp_event->event.type == event_id && 98 if (tp_event->event.type == event_id &&
99 tp_event->class && tp_event->class->perf_probe && 99 tp_event->class &&
100 (tp_event->class->perf_probe ||
101 tp_event->class->reg) &&
100 try_module_get(tp_event->mod)) { 102 try_module_get(tp_event->mod)) {
101 ret = perf_trace_event_init(tp_event, p_event); 103 ret = perf_trace_event_init(tp_event, p_event);
102 break; 104 break;