aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/freezer.c7
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/manage.c50
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/perf_counter.c1
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sched_fair.c10
7 files changed, 64 insertions, 13 deletions
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 2f4936cf7083..bd1d42b17cb2 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -44,12 +44,19 @@ void refrigerator(void)
44 recalc_sigpending(); /* We sent fake signal, clean it up */ 44 recalc_sigpending(); /* We sent fake signal, clean it up */
45 spin_unlock_irq(&current->sighand->siglock); 45 spin_unlock_irq(&current->sighand->siglock);
46 46
47 /* prevent accounting of that task to load */
48 current->flags |= PF_FREEZING;
49
47 for (;;) { 50 for (;;) {
48 set_current_state(TASK_UNINTERRUPTIBLE); 51 set_current_state(TASK_UNINTERRUPTIBLE);
49 if (!frozen(current)) 52 if (!frozen(current))
50 break; 53 break;
51 schedule(); 54 schedule();
52 } 55 }
56
57 /* Remove the accounting blocker */
58 current->flags &= ~PF_FREEZING;
59
53 pr_debug("%s left refrigerator\n", current->comm); 60 pr_debug("%s left refrigerator\n", current->comm);
54 __set_current_state(save); 61 __set_current_state(save);
55} 62}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 73468253143b..e70ed5592eb9 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -42,8 +42,7 @@ static inline void unregister_handler_proc(unsigned int irq,
42 42
43extern int irq_select_affinity_usr(unsigned int irq); 43extern int irq_select_affinity_usr(unsigned int irq);
44 44
45extern void 45extern void irq_set_thread_affinity(struct irq_desc *desc);
46irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
47 46
48/* 47/*
49 * Debugging printout: 48 * Debugging printout:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 50da67672901..f0de36f13a44 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
80 return 1; 80 return 1;
81} 81}
82 82
83void 83/**
84irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) 84 * irq_set_thread_affinity - Notify irq threads to adjust affinity
85 * @desc: irq descriptor which has affitnity changed
86 *
87 * We just set IRQTF_AFFINITY and delegate the affinity setting
88 * to the interrupt thread itself. We can not call
89 * set_cpus_allowed_ptr() here as we hold desc->lock and this
90 * code can be called from hard interrupt context.
91 */
92void irq_set_thread_affinity(struct irq_desc *desc)
85{ 93{
86 struct irqaction *action = desc->action; 94 struct irqaction *action = desc->action;
87 95
88 while (action) { 96 while (action) {
89 if (action->thread) 97 if (action->thread)
90 set_cpus_allowed_ptr(action->thread, cpumask); 98 set_bit(IRQTF_AFFINITY, &action->thread_flags);
91 action = action->next; 99 action = action->next;
92 } 100 }
93} 101}
@@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
112 if (desc->status & IRQ_MOVE_PCNTXT) { 120 if (desc->status & IRQ_MOVE_PCNTXT) {
113 if (!desc->chip->set_affinity(irq, cpumask)) { 121 if (!desc->chip->set_affinity(irq, cpumask)) {
114 cpumask_copy(desc->affinity, cpumask); 122 cpumask_copy(desc->affinity, cpumask);
115 irq_set_thread_affinity(desc, cpumask); 123 irq_set_thread_affinity(desc);
116 } 124 }
117 } 125 }
118 else { 126 else {
@@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
122#else 130#else
123 if (!desc->chip->set_affinity(irq, cpumask)) { 131 if (!desc->chip->set_affinity(irq, cpumask)) {
124 cpumask_copy(desc->affinity, cpumask); 132 cpumask_copy(desc->affinity, cpumask);
125 irq_set_thread_affinity(desc, cpumask); 133 irq_set_thread_affinity(desc);
126 } 134 }
127#endif 135#endif
128 desc->status |= IRQ_AFFINITY_SET; 136 desc->status |= IRQ_AFFINITY_SET;
@@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
176 spin_lock_irqsave(&desc->lock, flags); 184 spin_lock_irqsave(&desc->lock, flags);
177 ret = setup_affinity(irq, desc); 185 ret = setup_affinity(irq, desc);
178 if (!ret) 186 if (!ret)
179 irq_set_thread_affinity(desc, desc->affinity); 187 irq_set_thread_affinity(desc);
180 spin_unlock_irqrestore(&desc->lock, flags); 188 spin_unlock_irqrestore(&desc->lock, flags);
181 189
182 return ret; 190 return ret;
@@ -444,6 +452,34 @@ static int irq_wait_for_interrupt(struct irqaction *action)
444} 452}
445 453
446/* 454/*
455 * Check whether we need to change the affinity of the interrupt thread.
456 */
457static void
458irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
459{
460 cpumask_var_t mask;
461
462 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
463 return;
464
465 /*
466 * In case we are out of memory we set IRQTF_AFFINITY again and
467 * try again next time
468 */
469 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
470 set_bit(IRQTF_AFFINITY, &action->thread_flags);
471 return;
472 }
473
474 spin_lock_irq(&desc->lock);
475 cpumask_copy(mask, desc->affinity);
476 spin_unlock_irq(&desc->lock);
477
478 set_cpus_allowed_ptr(current, mask);
479 free_cpumask_var(mask);
480}
481
482/*
447 * Interrupt handler thread 483 * Interrupt handler thread
448 */ 484 */
449static int irq_thread(void *data) 485static int irq_thread(void *data)
@@ -458,6 +494,8 @@ static int irq_thread(void *data)
458 494
459 while (!irq_wait_for_interrupt(action)) { 495 while (!irq_wait_for_interrupt(action)) {
460 496
497 irq_thread_check_affinity(desc, action);
498
461 atomic_inc(&desc->threads_active); 499 atomic_inc(&desc->threads_active);
462 500
463 spin_lock_irq(&desc->lock); 501 spin_lock_irq(&desc->lock);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index cfe767ca1545..fcb6c96f2627 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -45,7 +45,7 @@ void move_masked_irq(int irq)
45 < nr_cpu_ids)) 45 < nr_cpu_ids))
46 if (!desc->chip->set_affinity(irq, desc->pending_mask)) { 46 if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
47 cpumask_copy(desc->affinity, desc->pending_mask); 47 cpumask_copy(desc->affinity, desc->pending_mask);
48 irq_set_thread_affinity(desc, desc->pending_mask); 48 irq_set_thread_affinity(desc);
49 } 49 }
50 50
51 cpumask_clear(desc->pending_mask); 51 cpumask_clear(desc->pending_mask);
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index a641eb753b8c..7bc888dfd06a 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2665,6 +2665,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2665 header.size += sizeof(cpu_entry); 2665 header.size += sizeof(cpu_entry);
2666 2666
2667 cpu_entry.cpu = raw_smp_processor_id(); 2667 cpu_entry.cpu = raw_smp_processor_id();
2668 cpu_entry.reserved = 0;
2668 } 2669 }
2669 2670
2670 if (sample_type & PERF_SAMPLE_PERIOD) 2671 if (sample_type & PERF_SAMPLE_PERIOD)
diff --git a/kernel/sched.c b/kernel/sched.c
index 98972d366fdc..1b59e265273b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7289,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
7289static void calc_global_load_remove(struct rq *rq) 7289static void calc_global_load_remove(struct rq *rq)
7290{ 7290{
7291 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 7291 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
7292 rq->calc_load_active = 0;
7292} 7293}
7293#endif /* CONFIG_HOTPLUG_CPU */ 7294#endif /* CONFIG_HOTPLUG_CPU */
7294 7295
@@ -7515,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7515 task_rq_unlock(rq, &flags); 7516 task_rq_unlock(rq, &flags);
7516 get_task_struct(p); 7517 get_task_struct(p);
7517 cpu_rq(cpu)->migration_thread = p; 7518 cpu_rq(cpu)->migration_thread = p;
7519 rq->calc_load_update = calc_load_update;
7518 break; 7520 break;
7519 7521
7520 case CPU_ONLINE: 7522 case CPU_ONLINE:
@@ -7525,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7525 /* Update our root-domain */ 7527 /* Update our root-domain */
7526 rq = cpu_rq(cpu); 7528 rq = cpu_rq(cpu);
7527 spin_lock_irqsave(&rq->lock, flags); 7529 spin_lock_irqsave(&rq->lock, flags);
7528 rq->calc_load_update = calc_load_update;
7529 rq->calc_load_active = 0;
7530 if (rq->rd) { 7530 if (rq->rd) {
7531 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7531 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7532 7532
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7c248dc30f41..9ffb2b2ceba4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
266 return min_vruntime; 266 return min_vruntime;
267} 267}
268 268
269static inline int entity_before(struct sched_entity *a,
270 struct sched_entity *b)
271{
272 return (s64)(a->vruntime - b->vruntime) < 0;
273}
274
269static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) 275static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
270{ 276{
271 return se->vruntime - cfs_rq->min_vruntime; 277 return se->vruntime - cfs_rq->min_vruntime;
@@ -1017,7 +1023,7 @@ static void yield_task_fair(struct rq *rq)
1017 /* 1023 /*
1018 * Already in the rightmost position? 1024 * Already in the rightmost position?
1019 */ 1025 */
1020 if (unlikely(!rightmost || rightmost->vruntime < se->vruntime)) 1026 if (unlikely(!rightmost || entity_before(rightmost, se)))
1021 return; 1027 return;
1022 1028
1023 /* 1029 /*
@@ -1713,7 +1719,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1713 1719
1714 /* 'curr' will be NULL if the child belongs to a different group */ 1720 /* 'curr' will be NULL if the child belongs to a different group */
1715 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && 1721 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1716 curr && curr->vruntime < se->vruntime) { 1722 curr && entity_before(curr, se)) {
1717 /* 1723 /*
1718 * Upon rescheduling, sched_class::put_prev_task() will place 1724 * Upon rescheduling, sched_class::put_prev_task() will place
1719 * 'current' within the tree based on its new key value. 1725 * 'current' within the tree based on its new key value.