diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/Kconfig | 3 | ||||
-rw-r--r-- | kernel/irq/handle.c | 111 | ||||
-rw-r--r-- | kernel/lockdep.c | 18 | ||||
-rw-r--r-- | kernel/params.c | 65 | ||||
-rw-r--r-- | kernel/perf_event.c | 46 | ||||
-rw-r--r-- | kernel/printk.c | 100 | ||||
-rw-r--r-- | kernel/sched.c | 26 | ||||
-rw-r--r-- | kernel/sched_autogroup.c | 32 | ||||
-rw-r--r-- | kernel/sched_autogroup.h | 4 | ||||
-rw-r--r-- | kernel/sched_debug.c | 42 | ||||
-rw-r--r-- | kernel/sched_fair.c | 113 | ||||
-rw-r--r-- | kernel/smp.c | 62 | ||||
-rw-r--r-- | kernel/sysctl.c | 3 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 8 | ||||
-rw-r--r-- | kernel/workqueue.c | 20 |
16 files changed, 381 insertions, 279 deletions
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 31d766bf5d2e..8e42fec7686d 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -9,9 +9,6 @@ menu "IRQ subsystem" | |||
9 | config GENERIC_HARDIRQS | 9 | config GENERIC_HARDIRQS |
10 | def_bool y | 10 | def_bool y |
11 | 11 | ||
12 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
13 | def_bool y | ||
14 | |||
15 | # Select this to disable the deprecated stuff | 12 | # Select this to disable the deprecated stuff |
16 | config GENERIC_HARDIRQS_NO_DEPRECATED | 13 | config GENERIC_HARDIRQS_NO_DEPRECATED |
17 | def_bool n | 14 | def_bool n |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index e2347eb63306..3540a7190122 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -118,114 +118,3 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
118 | 118 | ||
119 | return retval; | 119 | return retval; |
120 | } | 120 | } |
121 | |||
122 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
123 | |||
124 | #ifdef CONFIG_ENABLE_WARN_DEPRECATED | ||
125 | # warning __do_IRQ is deprecated. Please convert to proper flow handlers | ||
126 | #endif | ||
127 | |||
128 | /** | ||
129 | * __do_IRQ - original all in one highlevel IRQ handler | ||
130 | * @irq: the interrupt number | ||
131 | * | ||
132 | * __do_IRQ handles all normal device IRQ's (the special | ||
133 | * SMP cross-CPU interrupts have their own specific | ||
134 | * handlers). | ||
135 | * | ||
136 | * This is the original x86 implementation which is used for every | ||
137 | * interrupt type. | ||
138 | */ | ||
139 | unsigned int __do_IRQ(unsigned int irq) | ||
140 | { | ||
141 | struct irq_desc *desc = irq_to_desc(irq); | ||
142 | struct irqaction *action; | ||
143 | unsigned int status; | ||
144 | |||
145 | kstat_incr_irqs_this_cpu(irq, desc); | ||
146 | |||
147 | if (CHECK_IRQ_PER_CPU(desc->status)) { | ||
148 | irqreturn_t action_ret; | ||
149 | |||
150 | /* | ||
151 | * No locking required for CPU-local interrupts: | ||
152 | */ | ||
153 | if (desc->irq_data.chip->ack) | ||
154 | desc->irq_data.chip->ack(irq); | ||
155 | if (likely(!(desc->status & IRQ_DISABLED))) { | ||
156 | action_ret = handle_IRQ_event(irq, desc->action); | ||
157 | if (!noirqdebug) | ||
158 | note_interrupt(irq, desc, action_ret); | ||
159 | } | ||
160 | desc->irq_data.chip->end(irq); | ||
161 | return 1; | ||
162 | } | ||
163 | |||
164 | raw_spin_lock(&desc->lock); | ||
165 | if (desc->irq_data.chip->ack) | ||
166 | desc->irq_data.chip->ack(irq); | ||
167 | /* | ||
168 | * REPLAY is when Linux resends an IRQ that was dropped earlier | ||
169 | * WAITING is used by probe to mark irqs that are being tested | ||
170 | */ | ||
171 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | ||
172 | status |= IRQ_PENDING; /* we _want_ to handle it */ | ||
173 | |||
174 | /* | ||
175 | * If the IRQ is disabled for whatever reason, we cannot | ||
176 | * use the action we have. | ||
177 | */ | ||
178 | action = NULL; | ||
179 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | ||
180 | action = desc->action; | ||
181 | status &= ~IRQ_PENDING; /* we commit to handling */ | ||
182 | status |= IRQ_INPROGRESS; /* we are handling it */ | ||
183 | } | ||
184 | desc->status = status; | ||
185 | |||
186 | /* | ||
187 | * If there is no IRQ handler or it was disabled, exit early. | ||
188 | * Since we set PENDING, if another processor is handling | ||
189 | * a different instance of this same irq, the other processor | ||
190 | * will take care of it. | ||
191 | */ | ||
192 | if (unlikely(!action)) | ||
193 | goto out; | ||
194 | |||
195 | /* | ||
196 | * Edge triggered interrupts need to remember | ||
197 | * pending events. | ||
198 | * This applies to any hw interrupts that allow a second | ||
199 | * instance of the same irq to arrive while we are in do_IRQ | ||
200 | * or in the handler. But the code here only handles the _second_ | ||
201 | * instance of the irq, not the third or fourth. So it is mostly | ||
202 | * useful for irq hardware that does not mask cleanly in an | ||
203 | * SMP environment. | ||
204 | */ | ||
205 | for (;;) { | ||
206 | irqreturn_t action_ret; | ||
207 | |||
208 | raw_spin_unlock(&desc->lock); | ||
209 | |||
210 | action_ret = handle_IRQ_event(irq, action); | ||
211 | if (!noirqdebug) | ||
212 | note_interrupt(irq, desc, action_ret); | ||
213 | |||
214 | raw_spin_lock(&desc->lock); | ||
215 | if (likely(!(desc->status & IRQ_PENDING))) | ||
216 | break; | ||
217 | desc->status &= ~IRQ_PENDING; | ||
218 | } | ||
219 | desc->status &= ~IRQ_INPROGRESS; | ||
220 | |||
221 | out: | ||
222 | /* | ||
223 | * The ->end() handler has to deal with interrupts which got | ||
224 | * disabled while the handler was running. | ||
225 | */ | ||
226 | desc->irq_data.chip->end(irq); | ||
227 | raw_spin_unlock(&desc->lock); | ||
228 | |||
229 | return 1; | ||
230 | } | ||
231 | #endif | ||
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 42ba65dff7d9..0d2058da80f5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -2292,22 +2292,6 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) | |||
2292 | } | 2292 | } |
2293 | 2293 | ||
2294 | /* | 2294 | /* |
2295 | * Debugging helper: via this flag we know that we are in | ||
2296 | * 'early bootup code', and will warn about any invalid irqs-on event: | ||
2297 | */ | ||
2298 | static int early_boot_irqs_enabled; | ||
2299 | |||
2300 | void early_boot_irqs_off(void) | ||
2301 | { | ||
2302 | early_boot_irqs_enabled = 0; | ||
2303 | } | ||
2304 | |||
2305 | void early_boot_irqs_on(void) | ||
2306 | { | ||
2307 | early_boot_irqs_enabled = 1; | ||
2308 | } | ||
2309 | |||
2310 | /* | ||
2311 | * Hardirqs will be enabled: | 2295 | * Hardirqs will be enabled: |
2312 | */ | 2296 | */ |
2313 | void trace_hardirqs_on_caller(unsigned long ip) | 2297 | void trace_hardirqs_on_caller(unsigned long ip) |
@@ -2319,7 +2303,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2319 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2303 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
2320 | return; | 2304 | return; |
2321 | 2305 | ||
2322 | if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) | 2306 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) |
2323 | return; | 2307 | return; |
2324 | 2308 | ||
2325 | if (unlikely(curr->hardirqs_enabled)) { | 2309 | if (unlikely(curr->hardirqs_enabled)) { |
diff --git a/kernel/params.c b/kernel/params.c index 08107d181758..0da1411222b9 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -719,9 +719,7 @@ void destroy_params(const struct kernel_param *params, unsigned num) | |||
719 | params[i].ops->free(params[i].arg); | 719 | params[i].ops->free(params[i].arg); |
720 | } | 720 | } |
721 | 721 | ||
722 | static void __init kernel_add_sysfs_param(const char *name, | 722 | static struct module_kobject * __init locate_module_kobject(const char *name) |
723 | struct kernel_param *kparam, | ||
724 | unsigned int name_skip) | ||
725 | { | 723 | { |
726 | struct module_kobject *mk; | 724 | struct module_kobject *mk; |
727 | struct kobject *kobj; | 725 | struct kobject *kobj; |
@@ -729,10 +727,7 @@ static void __init kernel_add_sysfs_param(const char *name, | |||
729 | 727 | ||
730 | kobj = kset_find_obj(module_kset, name); | 728 | kobj = kset_find_obj(module_kset, name); |
731 | if (kobj) { | 729 | if (kobj) { |
732 | /* We already have one. Remove params so we can add more. */ | ||
733 | mk = to_module_kobject(kobj); | 730 | mk = to_module_kobject(kobj); |
734 | /* We need to remove it before adding parameters. */ | ||
735 | sysfs_remove_group(&mk->kobj, &mk->mp->grp); | ||
736 | } else { | 731 | } else { |
737 | mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); | 732 | mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); |
738 | BUG_ON(!mk); | 733 | BUG_ON(!mk); |
@@ -743,15 +738,36 @@ static void __init kernel_add_sysfs_param(const char *name, | |||
743 | "%s", name); | 738 | "%s", name); |
744 | if (err) { | 739 | if (err) { |
745 | kobject_put(&mk->kobj); | 740 | kobject_put(&mk->kobj); |
746 | printk(KERN_ERR "Module '%s' failed add to sysfs, " | 741 | printk(KERN_ERR |
747 | "error number %d\n", name, err); | 742 | "Module '%s' failed add to sysfs, error number %d\n", |
748 | printk(KERN_ERR "The system will be unstable now.\n"); | 743 | name, err); |
749 | return; | 744 | printk(KERN_ERR |
745 | "The system will be unstable now.\n"); | ||
746 | return NULL; | ||
750 | } | 747 | } |
751 | /* So that exit path is even. */ | 748 | |
749 | /* So that we hold reference in both cases. */ | ||
752 | kobject_get(&mk->kobj); | 750 | kobject_get(&mk->kobj); |
753 | } | 751 | } |
754 | 752 | ||
753 | return mk; | ||
754 | } | ||
755 | |||
756 | static void __init kernel_add_sysfs_param(const char *name, | ||
757 | struct kernel_param *kparam, | ||
758 | unsigned int name_skip) | ||
759 | { | ||
760 | struct module_kobject *mk; | ||
761 | int err; | ||
762 | |||
763 | mk = locate_module_kobject(name); | ||
764 | if (!mk) | ||
765 | return; | ||
766 | |||
767 | /* We need to remove old parameters before adding more. */ | ||
768 | if (mk->mp) | ||
769 | sysfs_remove_group(&mk->kobj, &mk->mp->grp); | ||
770 | |||
755 | /* These should not fail at boot. */ | 771 | /* These should not fail at boot. */ |
756 | err = add_sysfs_param(mk, kparam, kparam->name + name_skip); | 772 | err = add_sysfs_param(mk, kparam, kparam->name + name_skip); |
757 | BUG_ON(err); | 773 | BUG_ON(err); |
@@ -796,6 +812,32 @@ static void __init param_sysfs_builtin(void) | |||
796 | } | 812 | } |
797 | } | 813 | } |
798 | 814 | ||
815 | ssize_t __modver_version_show(struct module_attribute *mattr, | ||
816 | struct module *mod, char *buf) | ||
817 | { | ||
818 | struct module_version_attribute *vattr = | ||
819 | container_of(mattr, struct module_version_attribute, mattr); | ||
820 | |||
821 | return sprintf(buf, "%s\n", vattr->version); | ||
822 | } | ||
823 | |||
824 | extern struct module_version_attribute __start___modver[], __stop___modver[]; | ||
825 | |||
826 | static void __init version_sysfs_builtin(void) | ||
827 | { | ||
828 | const struct module_version_attribute *vattr; | ||
829 | struct module_kobject *mk; | ||
830 | int err; | ||
831 | |||
832 | for (vattr = __start___modver; vattr < __stop___modver; vattr++) { | ||
833 | mk = locate_module_kobject(vattr->module_name); | ||
834 | if (mk) { | ||
835 | err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); | ||
836 | kobject_uevent(&mk->kobj, KOBJ_ADD); | ||
837 | kobject_put(&mk->kobj); | ||
838 | } | ||
839 | } | ||
840 | } | ||
799 | 841 | ||
800 | /* module-related sysfs stuff */ | 842 | /* module-related sysfs stuff */ |
801 | 843 | ||
@@ -875,6 +917,7 @@ static int __init param_sysfs_init(void) | |||
875 | } | 917 | } |
876 | module_sysfs_initialized = 1; | 918 | module_sysfs_initialized = 1; |
877 | 919 | ||
920 | version_sysfs_builtin(); | ||
878 | param_sysfs_builtin(); | 921 | param_sysfs_builtin(); |
879 | 922 | ||
880 | return 0; | 923 | return 0; |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 84522c796987..126a302c481c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -2201,13 +2201,6 @@ find_lively_task_by_vpid(pid_t vpid) | |||
2201 | if (!task) | 2201 | if (!task) |
2202 | return ERR_PTR(-ESRCH); | 2202 | return ERR_PTR(-ESRCH); |
2203 | 2203 | ||
2204 | /* | ||
2205 | * Can't attach events to a dying task. | ||
2206 | */ | ||
2207 | err = -ESRCH; | ||
2208 | if (task->flags & PF_EXITING) | ||
2209 | goto errout; | ||
2210 | |||
2211 | /* Reuse ptrace permission checks for now. */ | 2204 | /* Reuse ptrace permission checks for now. */ |
2212 | err = -EACCES; | 2205 | err = -EACCES; |
2213 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) | 2206 | if (!ptrace_may_access(task, PTRACE_MODE_READ)) |
@@ -2268,14 +2261,27 @@ retry: | |||
2268 | 2261 | ||
2269 | get_ctx(ctx); | 2262 | get_ctx(ctx); |
2270 | 2263 | ||
2271 | if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) { | 2264 | err = 0; |
2272 | /* | 2265 | mutex_lock(&task->perf_event_mutex); |
2273 | * We raced with some other task; use | 2266 | /* |
2274 | * the context they set. | 2267 | * If it has already passed perf_event_exit_task(). |
2275 | */ | 2268 | * we must see PF_EXITING, it takes this mutex too. |
2269 | */ | ||
2270 | if (task->flags & PF_EXITING) | ||
2271 | err = -ESRCH; | ||
2272 | else if (task->perf_event_ctxp[ctxn]) | ||
2273 | err = -EAGAIN; | ||
2274 | else | ||
2275 | rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); | ||
2276 | mutex_unlock(&task->perf_event_mutex); | ||
2277 | |||
2278 | if (unlikely(err)) { | ||
2276 | put_task_struct(task); | 2279 | put_task_struct(task); |
2277 | kfree(ctx); | 2280 | kfree(ctx); |
2278 | goto retry; | 2281 | |
2282 | if (err == -EAGAIN) | ||
2283 | goto retry; | ||
2284 | goto errout; | ||
2279 | } | 2285 | } |
2280 | } | 2286 | } |
2281 | 2287 | ||
@@ -5374,6 +5380,8 @@ free_dev: | |||
5374 | goto out; | 5380 | goto out; |
5375 | } | 5381 | } |
5376 | 5382 | ||
5383 | static struct lock_class_key cpuctx_mutex; | ||
5384 | |||
5377 | int perf_pmu_register(struct pmu *pmu, char *name, int type) | 5385 | int perf_pmu_register(struct pmu *pmu, char *name, int type) |
5378 | { | 5386 | { |
5379 | int cpu, ret; | 5387 | int cpu, ret; |
@@ -5422,6 +5430,7 @@ skip_type: | |||
5422 | 5430 | ||
5423 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); | 5431 | cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); |
5424 | __perf_event_init_context(&cpuctx->ctx); | 5432 | __perf_event_init_context(&cpuctx->ctx); |
5433 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); | ||
5425 | cpuctx->ctx.type = cpu_context; | 5434 | cpuctx->ctx.type = cpu_context; |
5426 | cpuctx->ctx.pmu = pmu; | 5435 | cpuctx->ctx.pmu = pmu; |
5427 | cpuctx->jiffies_interval = 1; | 5436 | cpuctx->jiffies_interval = 1; |
@@ -6127,7 +6136,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
6127 | * scheduled, so we are now safe from rescheduling changing | 6136 | * scheduled, so we are now safe from rescheduling changing |
6128 | * our context. | 6137 | * our context. |
6129 | */ | 6138 | */ |
6130 | child_ctx = child->perf_event_ctxp[ctxn]; | 6139 | child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); |
6131 | task_ctx_sched_out(child_ctx, EVENT_ALL); | 6140 | task_ctx_sched_out(child_ctx, EVENT_ALL); |
6132 | 6141 | ||
6133 | /* | 6142 | /* |
@@ -6440,11 +6449,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
6440 | unsigned long flags; | 6449 | unsigned long flags; |
6441 | int ret = 0; | 6450 | int ret = 0; |
6442 | 6451 | ||
6443 | child->perf_event_ctxp[ctxn] = NULL; | ||
6444 | |||
6445 | mutex_init(&child->perf_event_mutex); | ||
6446 | INIT_LIST_HEAD(&child->perf_event_list); | ||
6447 | |||
6448 | if (likely(!parent->perf_event_ctxp[ctxn])) | 6452 | if (likely(!parent->perf_event_ctxp[ctxn])) |
6449 | return 0; | 6453 | return 0; |
6450 | 6454 | ||
@@ -6533,6 +6537,10 @@ int perf_event_init_task(struct task_struct *child) | |||
6533 | { | 6537 | { |
6534 | int ctxn, ret; | 6538 | int ctxn, ret; |
6535 | 6539 | ||
6540 | memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); | ||
6541 | mutex_init(&child->perf_event_mutex); | ||
6542 | INIT_LIST_HEAD(&child->perf_event_list); | ||
6543 | |||
6536 | for_each_task_context_nr(ctxn) { | 6544 | for_each_task_context_nr(ctxn) { |
6537 | ret = perf_event_init_context(child, ctxn); | 6545 | ret = perf_event_init_context(child, ctxn); |
6538 | if (ret) | 6546 | if (ret) |
diff --git a/kernel/printk.c b/kernel/printk.c index 53d9a9ec88e6..2ddbdc73aade 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -97,7 +97,7 @@ static int console_locked, console_suspended; | |||
97 | /* | 97 | /* |
98 | * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars | 98 | * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars |
99 | * It is also used in interesting ways to provide interlocking in | 99 | * It is also used in interesting ways to provide interlocking in |
100 | * release_console_sem(). | 100 | * console_unlock();. |
101 | */ | 101 | */ |
102 | static DEFINE_SPINLOCK(logbuf_lock); | 102 | static DEFINE_SPINLOCK(logbuf_lock); |
103 | 103 | ||
@@ -501,7 +501,7 @@ static void _call_console_drivers(unsigned start, | |||
501 | /* | 501 | /* |
502 | * Call the console drivers, asking them to write out | 502 | * Call the console drivers, asking them to write out |
503 | * log_buf[start] to log_buf[end - 1]. | 503 | * log_buf[start] to log_buf[end - 1]. |
504 | * The console_sem must be held. | 504 | * The console_lock must be held. |
505 | */ | 505 | */ |
506 | static void call_console_drivers(unsigned start, unsigned end) | 506 | static void call_console_drivers(unsigned start, unsigned end) |
507 | { | 507 | { |
@@ -604,11 +604,11 @@ static int have_callable_console(void) | |||
604 | * | 604 | * |
605 | * This is printk(). It can be called from any context. We want it to work. | 605 | * This is printk(). It can be called from any context. We want it to work. |
606 | * | 606 | * |
607 | * We try to grab the console_sem. If we succeed, it's easy - we log the output and | 607 | * We try to grab the console_lock. If we succeed, it's easy - we log the output and |
608 | * call the console drivers. If we fail to get the semaphore we place the output | 608 | * call the console drivers. If we fail to get the semaphore we place the output |
609 | * into the log buffer and return. The current holder of the console_sem will | 609 | * into the log buffer and return. The current holder of the console_sem will |
610 | * notice the new output in release_console_sem() and will send it to the | 610 | * notice the new output in console_unlock(); and will send it to the |
611 | * consoles before releasing the semaphore. | 611 | * consoles before releasing the lock. |
612 | * | 612 | * |
613 | * One effect of this deferred printing is that code which calls printk() and | 613 | * One effect of this deferred printing is that code which calls printk() and |
614 | * then changes console_loglevel may break. This is because console_loglevel | 614 | * then changes console_loglevel may break. This is because console_loglevel |
@@ -659,19 +659,19 @@ static inline int can_use_console(unsigned int cpu) | |||
659 | /* | 659 | /* |
660 | * Try to get console ownership to actually show the kernel | 660 | * Try to get console ownership to actually show the kernel |
661 | * messages from a 'printk'. Return true (and with the | 661 | * messages from a 'printk'. Return true (and with the |
662 | * console_semaphore held, and 'console_locked' set) if it | 662 | * console_lock held, and 'console_locked' set) if it |
663 | * is successful, false otherwise. | 663 | * is successful, false otherwise. |
664 | * | 664 | * |
665 | * This gets called with the 'logbuf_lock' spinlock held and | 665 | * This gets called with the 'logbuf_lock' spinlock held and |
666 | * interrupts disabled. It should return with 'lockbuf_lock' | 666 | * interrupts disabled. It should return with 'lockbuf_lock' |
667 | * released but interrupts still disabled. | 667 | * released but interrupts still disabled. |
668 | */ | 668 | */ |
669 | static int acquire_console_semaphore_for_printk(unsigned int cpu) | 669 | static int console_trylock_for_printk(unsigned int cpu) |
670 | __releases(&logbuf_lock) | 670 | __releases(&logbuf_lock) |
671 | { | 671 | { |
672 | int retval = 0; | 672 | int retval = 0; |
673 | 673 | ||
674 | if (!try_acquire_console_sem()) { | 674 | if (console_trylock()) { |
675 | retval = 1; | 675 | retval = 1; |
676 | 676 | ||
677 | /* | 677 | /* |
@@ -827,12 +827,12 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |||
827 | * actual magic (print out buffers, wake up klogd, | 827 | * actual magic (print out buffers, wake up klogd, |
828 | * etc). | 828 | * etc). |
829 | * | 829 | * |
830 | * The acquire_console_semaphore_for_printk() function | 830 | * The console_trylock_for_printk() function |
831 | * will release 'logbuf_lock' regardless of whether it | 831 | * will release 'logbuf_lock' regardless of whether it |
832 | * actually gets the semaphore or not. | 832 | * actually gets the semaphore or not. |
833 | */ | 833 | */ |
834 | if (acquire_console_semaphore_for_printk(this_cpu)) | 834 | if (console_trylock_for_printk(this_cpu)) |
835 | release_console_sem(); | 835 | console_unlock(); |
836 | 836 | ||
837 | lockdep_on(); | 837 | lockdep_on(); |
838 | out_restore_irqs: | 838 | out_restore_irqs: |
@@ -993,7 +993,7 @@ void suspend_console(void) | |||
993 | if (!console_suspend_enabled) | 993 | if (!console_suspend_enabled) |
994 | return; | 994 | return; |
995 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); | 995 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); |
996 | acquire_console_sem(); | 996 | console_lock(); |
997 | console_suspended = 1; | 997 | console_suspended = 1; |
998 | up(&console_sem); | 998 | up(&console_sem); |
999 | } | 999 | } |
@@ -1004,7 +1004,7 @@ void resume_console(void) | |||
1004 | return; | 1004 | return; |
1005 | down(&console_sem); | 1005 | down(&console_sem); |
1006 | console_suspended = 0; | 1006 | console_suspended = 0; |
1007 | release_console_sem(); | 1007 | console_unlock(); |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | /** | 1010 | /** |
@@ -1027,21 +1027,21 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self, | |||
1027 | case CPU_DYING: | 1027 | case CPU_DYING: |
1028 | case CPU_DOWN_FAILED: | 1028 | case CPU_DOWN_FAILED: |
1029 | case CPU_UP_CANCELED: | 1029 | case CPU_UP_CANCELED: |
1030 | acquire_console_sem(); | 1030 | console_lock(); |
1031 | release_console_sem(); | 1031 | console_unlock(); |
1032 | } | 1032 | } |
1033 | return NOTIFY_OK; | 1033 | return NOTIFY_OK; |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | /** | 1036 | /** |
1037 | * acquire_console_sem - lock the console system for exclusive use. | 1037 | * console_lock - lock the console system for exclusive use. |
1038 | * | 1038 | * |
1039 | * Acquires a semaphore which guarantees that the caller has | 1039 | * Acquires a lock which guarantees that the caller has |
1040 | * exclusive access to the console system and the console_drivers list. | 1040 | * exclusive access to the console system and the console_drivers list. |
1041 | * | 1041 | * |
1042 | * Can sleep, returns nothing. | 1042 | * Can sleep, returns nothing. |
1043 | */ | 1043 | */ |
1044 | void acquire_console_sem(void) | 1044 | void console_lock(void) |
1045 | { | 1045 | { |
1046 | BUG_ON(in_interrupt()); | 1046 | BUG_ON(in_interrupt()); |
1047 | down(&console_sem); | 1047 | down(&console_sem); |
@@ -1050,21 +1050,29 @@ void acquire_console_sem(void) | |||
1050 | console_locked = 1; | 1050 | console_locked = 1; |
1051 | console_may_schedule = 1; | 1051 | console_may_schedule = 1; |
1052 | } | 1052 | } |
1053 | EXPORT_SYMBOL(acquire_console_sem); | 1053 | EXPORT_SYMBOL(console_lock); |
1054 | 1054 | ||
1055 | int try_acquire_console_sem(void) | 1055 | /** |
1056 | * console_trylock - try to lock the console system for exclusive use. | ||
1057 | * | ||
1058 | * Tried to acquire a lock which guarantees that the caller has | ||
1059 | * exclusive access to the console system and the console_drivers list. | ||
1060 | * | ||
1061 | * returns 1 on success, and 0 on failure to acquire the lock. | ||
1062 | */ | ||
1063 | int console_trylock(void) | ||
1056 | { | 1064 | { |
1057 | if (down_trylock(&console_sem)) | 1065 | if (down_trylock(&console_sem)) |
1058 | return -1; | 1066 | return 0; |
1059 | if (console_suspended) { | 1067 | if (console_suspended) { |
1060 | up(&console_sem); | 1068 | up(&console_sem); |
1061 | return -1; | 1069 | return 0; |
1062 | } | 1070 | } |
1063 | console_locked = 1; | 1071 | console_locked = 1; |
1064 | console_may_schedule = 0; | 1072 | console_may_schedule = 0; |
1065 | return 0; | 1073 | return 1; |
1066 | } | 1074 | } |
1067 | EXPORT_SYMBOL(try_acquire_console_sem); | 1075 | EXPORT_SYMBOL(console_trylock); |
1068 | 1076 | ||
1069 | int is_console_locked(void) | 1077 | int is_console_locked(void) |
1070 | { | 1078 | { |
@@ -1095,20 +1103,20 @@ void wake_up_klogd(void) | |||
1095 | } | 1103 | } |
1096 | 1104 | ||
1097 | /** | 1105 | /** |
1098 | * release_console_sem - unlock the console system | 1106 | * console_unlock - unlock the console system |
1099 | * | 1107 | * |
1100 | * Releases the semaphore which the caller holds on the console system | 1108 | * Releases the console_lock which the caller holds on the console system |
1101 | * and the console driver list. | 1109 | * and the console driver list. |
1102 | * | 1110 | * |
1103 | * While the semaphore was held, console output may have been buffered | 1111 | * While the console_lock was held, console output may have been buffered |
1104 | * by printk(). If this is the case, release_console_sem() emits | 1112 | * by printk(). If this is the case, console_unlock(); emits |
1105 | * the output prior to releasing the semaphore. | 1113 | * the output prior to releasing the lock. |
1106 | * | 1114 | * |
1107 | * If there is output waiting for klogd, we wake it up. | 1115 | * If there is output waiting for klogd, we wake it up. |
1108 | * | 1116 | * |
1109 | * release_console_sem() may be called from any context. | 1117 | * console_unlock(); may be called from any context. |
1110 | */ | 1118 | */ |
1111 | void release_console_sem(void) | 1119 | void console_unlock(void) |
1112 | { | 1120 | { |
1113 | unsigned long flags; | 1121 | unsigned long flags; |
1114 | unsigned _con_start, _log_end; | 1122 | unsigned _con_start, _log_end; |
@@ -1141,7 +1149,7 @@ void release_console_sem(void) | |||
1141 | if (wake_klogd) | 1149 | if (wake_klogd) |
1142 | wake_up_klogd(); | 1150 | wake_up_klogd(); |
1143 | } | 1151 | } |
1144 | EXPORT_SYMBOL(release_console_sem); | 1152 | EXPORT_SYMBOL(console_unlock); |
1145 | 1153 | ||
1146 | /** | 1154 | /** |
1147 | * console_conditional_schedule - yield the CPU if required | 1155 | * console_conditional_schedule - yield the CPU if required |
@@ -1150,7 +1158,7 @@ EXPORT_SYMBOL(release_console_sem); | |||
1150 | * if this CPU should yield the CPU to another task, do | 1158 | * if this CPU should yield the CPU to another task, do |
1151 | * so here. | 1159 | * so here. |
1152 | * | 1160 | * |
1153 | * Must be called within acquire_console_sem(). | 1161 | * Must be called within console_lock();. |
1154 | */ | 1162 | */ |
1155 | void __sched console_conditional_schedule(void) | 1163 | void __sched console_conditional_schedule(void) |
1156 | { | 1164 | { |
@@ -1171,14 +1179,14 @@ void console_unblank(void) | |||
1171 | if (down_trylock(&console_sem) != 0) | 1179 | if (down_trylock(&console_sem) != 0) |
1172 | return; | 1180 | return; |
1173 | } else | 1181 | } else |
1174 | acquire_console_sem(); | 1182 | console_lock(); |
1175 | 1183 | ||
1176 | console_locked = 1; | 1184 | console_locked = 1; |
1177 | console_may_schedule = 0; | 1185 | console_may_schedule = 0; |
1178 | for_each_console(c) | 1186 | for_each_console(c) |
1179 | if ((c->flags & CON_ENABLED) && c->unblank) | 1187 | if ((c->flags & CON_ENABLED) && c->unblank) |
1180 | c->unblank(); | 1188 | c->unblank(); |
1181 | release_console_sem(); | 1189 | console_unlock(); |
1182 | } | 1190 | } |
1183 | 1191 | ||
1184 | /* | 1192 | /* |
@@ -1189,7 +1197,7 @@ struct tty_driver *console_device(int *index) | |||
1189 | struct console *c; | 1197 | struct console *c; |
1190 | struct tty_driver *driver = NULL; | 1198 | struct tty_driver *driver = NULL; |
1191 | 1199 | ||
1192 | acquire_console_sem(); | 1200 | console_lock(); |
1193 | for_each_console(c) { | 1201 | for_each_console(c) { |
1194 | if (!c->device) | 1202 | if (!c->device) |
1195 | continue; | 1203 | continue; |
@@ -1197,7 +1205,7 @@ struct tty_driver *console_device(int *index) | |||
1197 | if (driver) | 1205 | if (driver) |
1198 | break; | 1206 | break; |
1199 | } | 1207 | } |
1200 | release_console_sem(); | 1208 | console_unlock(); |
1201 | return driver; | 1209 | return driver; |
1202 | } | 1210 | } |
1203 | 1211 | ||
@@ -1208,17 +1216,17 @@ struct tty_driver *console_device(int *index) | |||
1208 | */ | 1216 | */ |
1209 | void console_stop(struct console *console) | 1217 | void console_stop(struct console *console) |
1210 | { | 1218 | { |
1211 | acquire_console_sem(); | 1219 | console_lock(); |
1212 | console->flags &= ~CON_ENABLED; | 1220 | console->flags &= ~CON_ENABLED; |
1213 | release_console_sem(); | 1221 | console_unlock(); |
1214 | } | 1222 | } |
1215 | EXPORT_SYMBOL(console_stop); | 1223 | EXPORT_SYMBOL(console_stop); |
1216 | 1224 | ||
1217 | void console_start(struct console *console) | 1225 | void console_start(struct console *console) |
1218 | { | 1226 | { |
1219 | acquire_console_sem(); | 1227 | console_lock(); |
1220 | console->flags |= CON_ENABLED; | 1228 | console->flags |= CON_ENABLED; |
1221 | release_console_sem(); | 1229 | console_unlock(); |
1222 | } | 1230 | } |
1223 | EXPORT_SYMBOL(console_start); | 1231 | EXPORT_SYMBOL(console_start); |
1224 | 1232 | ||
@@ -1340,7 +1348,7 @@ void register_console(struct console *newcon) | |||
1340 | * Put this console in the list - keep the | 1348 | * Put this console in the list - keep the |
1341 | * preferred driver at the head of the list. | 1349 | * preferred driver at the head of the list. |
1342 | */ | 1350 | */ |
1343 | acquire_console_sem(); | 1351 | console_lock(); |
1344 | if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { | 1352 | if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { |
1345 | newcon->next = console_drivers; | 1353 | newcon->next = console_drivers; |
1346 | console_drivers = newcon; | 1354 | console_drivers = newcon; |
@@ -1352,14 +1360,14 @@ void register_console(struct console *newcon) | |||
1352 | } | 1360 | } |
1353 | if (newcon->flags & CON_PRINTBUFFER) { | 1361 | if (newcon->flags & CON_PRINTBUFFER) { |
1354 | /* | 1362 | /* |
1355 | * release_console_sem() will print out the buffered messages | 1363 | * console_unlock(); will print out the buffered messages |
1356 | * for us. | 1364 | * for us. |
1357 | */ | 1365 | */ |
1358 | spin_lock_irqsave(&logbuf_lock, flags); | 1366 | spin_lock_irqsave(&logbuf_lock, flags); |
1359 | con_start = log_start; | 1367 | con_start = log_start; |
1360 | spin_unlock_irqrestore(&logbuf_lock, flags); | 1368 | spin_unlock_irqrestore(&logbuf_lock, flags); |
1361 | } | 1369 | } |
1362 | release_console_sem(); | 1370 | console_unlock(); |
1363 | console_sysfs_notify(); | 1371 | console_sysfs_notify(); |
1364 | 1372 | ||
1365 | /* | 1373 | /* |
@@ -1396,7 +1404,7 @@ int unregister_console(struct console *console) | |||
1396 | return braille_unregister_console(console); | 1404 | return braille_unregister_console(console); |
1397 | #endif | 1405 | #endif |
1398 | 1406 | ||
1399 | acquire_console_sem(); | 1407 | console_lock(); |
1400 | if (console_drivers == console) { | 1408 | if (console_drivers == console) { |
1401 | console_drivers=console->next; | 1409 | console_drivers=console->next; |
1402 | res = 0; | 1410 | res = 0; |
@@ -1418,7 +1426,7 @@ int unregister_console(struct console *console) | |||
1418 | if (console_drivers != NULL && console->flags & CON_CONSDEV) | 1426 | if (console_drivers != NULL && console->flags & CON_CONSDEV) |
1419 | console_drivers->flags |= CON_CONSDEV; | 1427 | console_drivers->flags |= CON_CONSDEV; |
1420 | 1428 | ||
1421 | release_console_sem(); | 1429 | console_unlock(); |
1422 | console_sysfs_notify(); | 1430 | console_sysfs_notify(); |
1423 | return res; | 1431 | return res; |
1424 | } | 1432 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index ea3e5eff3878..18d38e4ec7ba 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -553,9 +553,6 @@ struct rq { | |||
553 | /* try_to_wake_up() stats */ | 553 | /* try_to_wake_up() stats */ |
554 | unsigned int ttwu_count; | 554 | unsigned int ttwu_count; |
555 | unsigned int ttwu_local; | 555 | unsigned int ttwu_local; |
556 | |||
557 | /* BKL stats */ | ||
558 | unsigned int bkl_count; | ||
559 | #endif | 556 | #endif |
560 | }; | 557 | }; |
561 | 558 | ||
@@ -609,6 +606,9 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
609 | struct task_group *tg; | 606 | struct task_group *tg; |
610 | struct cgroup_subsys_state *css; | 607 | struct cgroup_subsys_state *css; |
611 | 608 | ||
609 | if (p->flags & PF_EXITING) | ||
610 | return &root_task_group; | ||
611 | |||
612 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, | 612 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, |
613 | lockdep_is_held(&task_rq(p)->lock)); | 613 | lockdep_is_held(&task_rq(p)->lock)); |
614 | tg = container_of(css, struct task_group, css); | 614 | tg = container_of(css, struct task_group, css); |
@@ -3887,7 +3887,7 @@ static inline void schedule_debug(struct task_struct *prev) | |||
3887 | schedstat_inc(this_rq(), sched_count); | 3887 | schedstat_inc(this_rq(), sched_count); |
3888 | #ifdef CONFIG_SCHEDSTATS | 3888 | #ifdef CONFIG_SCHEDSTATS |
3889 | if (unlikely(prev->lock_depth >= 0)) { | 3889 | if (unlikely(prev->lock_depth >= 0)) { |
3890 | schedstat_inc(this_rq(), bkl_count); | 3890 | schedstat_inc(this_rq(), rq_sched_info.bkl_count); |
3891 | schedstat_inc(prev, sched_info.bkl_count); | 3891 | schedstat_inc(prev, sched_info.bkl_count); |
3892 | } | 3892 | } |
3893 | #endif | 3893 | #endif |
@@ -4871,7 +4871,8 @@ recheck: | |||
4871 | * assigned. | 4871 | * assigned. |
4872 | */ | 4872 | */ |
4873 | if (rt_bandwidth_enabled() && rt_policy(policy) && | 4873 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
4874 | task_group(p)->rt_bandwidth.rt_runtime == 0) { | 4874 | task_group(p)->rt_bandwidth.rt_runtime == 0 && |
4875 | !task_group_is_autogroup(task_group(p))) { | ||
4875 | __task_rq_unlock(rq); | 4876 | __task_rq_unlock(rq); |
4876 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | 4877 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
4877 | return -EPERM; | 4878 | return -EPERM; |
@@ -8882,6 +8883,20 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
8882 | } | 8883 | } |
8883 | } | 8884 | } |
8884 | 8885 | ||
8886 | static void | ||
8887 | cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task) | ||
8888 | { | ||
8889 | /* | ||
8890 | * cgroup_exit() is called in the copy_process() failure path. | ||
8891 | * Ignore this case since the task hasn't ran yet, this avoids | ||
8892 | * trying to poke a half freed task state from generic code. | ||
8893 | */ | ||
8894 | if (!(task->flags & PF_EXITING)) | ||
8895 | return; | ||
8896 | |||
8897 | sched_move_task(task); | ||
8898 | } | ||
8899 | |||
8885 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8900 | #ifdef CONFIG_FAIR_GROUP_SCHED |
8886 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, | 8901 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
8887 | u64 shareval) | 8902 | u64 shareval) |
@@ -8954,6 +8969,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
8954 | .destroy = cpu_cgroup_destroy, | 8969 | .destroy = cpu_cgroup_destroy, |
8955 | .can_attach = cpu_cgroup_can_attach, | 8970 | .can_attach = cpu_cgroup_can_attach, |
8956 | .attach = cpu_cgroup_attach, | 8971 | .attach = cpu_cgroup_attach, |
8972 | .exit = cpu_cgroup_exit, | ||
8957 | .populate = cpu_cgroup_populate, | 8973 | .populate = cpu_cgroup_populate, |
8958 | .subsys_id = cpu_cgroup_subsys_id, | 8974 | .subsys_id = cpu_cgroup_subsys_id, |
8959 | .early_init = 1, | 8975 | .early_init = 1, |
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c index 32a723b8f84c..9fb656283157 100644 --- a/kernel/sched_autogroup.c +++ b/kernel/sched_autogroup.c | |||
@@ -27,6 +27,11 @@ static inline void autogroup_destroy(struct kref *kref) | |||
27 | { | 27 | { |
28 | struct autogroup *ag = container_of(kref, struct autogroup, kref); | 28 | struct autogroup *ag = container_of(kref, struct autogroup, kref); |
29 | 29 | ||
30 | #ifdef CONFIG_RT_GROUP_SCHED | ||
31 | /* We've redirected RT tasks to the root task group... */ | ||
32 | ag->tg->rt_se = NULL; | ||
33 | ag->tg->rt_rq = NULL; | ||
34 | #endif | ||
30 | sched_destroy_group(ag->tg); | 35 | sched_destroy_group(ag->tg); |
31 | } | 36 | } |
32 | 37 | ||
@@ -55,6 +60,10 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p) | |||
55 | return ag; | 60 | return ag; |
56 | } | 61 | } |
57 | 62 | ||
63 | #ifdef CONFIG_RT_GROUP_SCHED | ||
64 | static void free_rt_sched_group(struct task_group *tg); | ||
65 | #endif | ||
66 | |||
58 | static inline struct autogroup *autogroup_create(void) | 67 | static inline struct autogroup *autogroup_create(void) |
59 | { | 68 | { |
60 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); | 69 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); |
@@ -72,6 +81,19 @@ static inline struct autogroup *autogroup_create(void) | |||
72 | init_rwsem(&ag->lock); | 81 | init_rwsem(&ag->lock); |
73 | ag->id = atomic_inc_return(&autogroup_seq_nr); | 82 | ag->id = atomic_inc_return(&autogroup_seq_nr); |
74 | ag->tg = tg; | 83 | ag->tg = tg; |
84 | #ifdef CONFIG_RT_GROUP_SCHED | ||
85 | /* | ||
86 | * Autogroup RT tasks are redirected to the root task group | ||
87 | * so we don't have to move tasks around upon policy change, | ||
88 | * or flail around trying to allocate bandwidth on the fly. | ||
89 | * A bandwidth exception in __sched_setscheduler() allows | ||
90 | * the policy change to proceed. Thereafter, task_group() | ||
91 | * returns &root_task_group, so zero bandwidth is required. | ||
92 | */ | ||
93 | free_rt_sched_group(tg); | ||
94 | tg->rt_se = root_task_group.rt_se; | ||
95 | tg->rt_rq = root_task_group.rt_rq; | ||
96 | #endif | ||
75 | tg->autogroup = ag; | 97 | tg->autogroup = ag; |
76 | 98 | ||
77 | return ag; | 99 | return ag; |
@@ -106,6 +128,11 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg) | |||
106 | return true; | 128 | return true; |
107 | } | 129 | } |
108 | 130 | ||
131 | static inline bool task_group_is_autogroup(struct task_group *tg) | ||
132 | { | ||
133 | return tg != &root_task_group && tg->autogroup; | ||
134 | } | ||
135 | |||
109 | static inline struct task_group * | 136 | static inline struct task_group * |
110 | autogroup_task_group(struct task_struct *p, struct task_group *tg) | 137 | autogroup_task_group(struct task_struct *p, struct task_group *tg) |
111 | { | 138 | { |
@@ -231,6 +258,11 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) | |||
231 | #ifdef CONFIG_SCHED_DEBUG | 258 | #ifdef CONFIG_SCHED_DEBUG |
232 | static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) | 259 | static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) |
233 | { | 260 | { |
261 | int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); | ||
262 | |||
263 | if (!enabled || !tg->autogroup) | ||
264 | return 0; | ||
265 | |||
234 | return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); | 266 | return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); |
235 | } | 267 | } |
236 | #endif /* CONFIG_SCHED_DEBUG */ | 268 | #endif /* CONFIG_SCHED_DEBUG */ |
diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h index 5358e241cb20..7b859ffe5dad 100644 --- a/kernel/sched_autogroup.h +++ b/kernel/sched_autogroup.h | |||
@@ -15,6 +15,10 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg); | |||
15 | 15 | ||
16 | static inline void autogroup_init(struct task_struct *init_task) { } | 16 | static inline void autogroup_init(struct task_struct *init_task) { } |
17 | static inline void autogroup_free(struct task_group *tg) { } | 17 | static inline void autogroup_free(struct task_group *tg) { } |
18 | static inline bool task_group_is_autogroup(struct task_group *tg) | ||
19 | { | ||
20 | return 0; | ||
21 | } | ||
18 | 22 | ||
19 | static inline struct task_group * | 23 | static inline struct task_group * |
20 | autogroup_task_group(struct task_struct *p, struct task_group *tg) | 24 | autogroup_task_group(struct task_struct *p, struct task_group *tg) |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 1dfae3d014b5..eb6cb8edd075 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/kallsyms.h> | 16 | #include <linux/kallsyms.h> |
17 | #include <linux/utsname.h> | 17 | #include <linux/utsname.h> |
18 | 18 | ||
19 | static DEFINE_SPINLOCK(sched_debug_lock); | ||
20 | |||
19 | /* | 21 | /* |
20 | * This allows printing both to /proc/sched_debug and | 22 | * This allows printing both to /proc/sched_debug and |
21 | * to the console | 23 | * to the console |
@@ -86,6 +88,26 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group | |||
86 | } | 88 | } |
87 | #endif | 89 | #endif |
88 | 90 | ||
91 | #ifdef CONFIG_CGROUP_SCHED | ||
92 | static char group_path[PATH_MAX]; | ||
93 | |||
94 | static char *task_group_path(struct task_group *tg) | ||
95 | { | ||
96 | if (autogroup_path(tg, group_path, PATH_MAX)) | ||
97 | return group_path; | ||
98 | |||
99 | /* | ||
100 | * May be NULL if the underlying cgroup isn't fully-created yet | ||
101 | */ | ||
102 | if (!tg->css.cgroup) { | ||
103 | group_path[0] = '\0'; | ||
104 | return group_path; | ||
105 | } | ||
106 | cgroup_path(tg->css.cgroup, group_path, PATH_MAX); | ||
107 | return group_path; | ||
108 | } | ||
109 | #endif | ||
110 | |||
89 | static void | 111 | static void |
90 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | 112 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
91 | { | 113 | { |
@@ -108,6 +130,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
108 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", | 130 | SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", |
109 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); | 131 | 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); |
110 | #endif | 132 | #endif |
133 | #ifdef CONFIG_CGROUP_SCHED | ||
134 | SEQ_printf(m, " %s", task_group_path(task_group(p))); | ||
135 | #endif | ||
111 | 136 | ||
112 | SEQ_printf(m, "\n"); | 137 | SEQ_printf(m, "\n"); |
113 | } | 138 | } |
@@ -144,7 +169,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
144 | struct sched_entity *last; | 169 | struct sched_entity *last; |
145 | unsigned long flags; | 170 | unsigned long flags; |
146 | 171 | ||
172 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
173 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); | ||
174 | #else | ||
147 | SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); | 175 | SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); |
176 | #endif | ||
148 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", | 177 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", |
149 | SPLIT_NS(cfs_rq->exec_clock)); | 178 | SPLIT_NS(cfs_rq->exec_clock)); |
150 | 179 | ||
@@ -191,7 +220,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
191 | 220 | ||
192 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | 221 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) |
193 | { | 222 | { |
223 | #ifdef CONFIG_RT_GROUP_SCHED | ||
224 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); | ||
225 | #else | ||
194 | SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); | 226 | SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); |
227 | #endif | ||
195 | 228 | ||
196 | #define P(x) \ | 229 | #define P(x) \ |
197 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) | 230 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) |
@@ -212,6 +245,7 @@ extern __read_mostly int sched_clock_running; | |||
212 | static void print_cpu(struct seq_file *m, int cpu) | 245 | static void print_cpu(struct seq_file *m, int cpu) |
213 | { | 246 | { |
214 | struct rq *rq = cpu_rq(cpu); | 247 | struct rq *rq = cpu_rq(cpu); |
248 | unsigned long flags; | ||
215 | 249 | ||
216 | #ifdef CONFIG_X86 | 250 | #ifdef CONFIG_X86 |
217 | { | 251 | { |
@@ -262,14 +296,20 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
262 | P(ttwu_count); | 296 | P(ttwu_count); |
263 | P(ttwu_local); | 297 | P(ttwu_local); |
264 | 298 | ||
265 | P(bkl_count); | 299 | SEQ_printf(m, " .%-30s: %d\n", "bkl_count", |
300 | rq->rq_sched_info.bkl_count); | ||
266 | 301 | ||
267 | #undef P | 302 | #undef P |
303 | #undef P64 | ||
268 | #endif | 304 | #endif |
305 | spin_lock_irqsave(&sched_debug_lock, flags); | ||
269 | print_cfs_stats(m, cpu); | 306 | print_cfs_stats(m, cpu); |
270 | print_rt_stats(m, cpu); | 307 | print_rt_stats(m, cpu); |
271 | 308 | ||
309 | rcu_read_lock(); | ||
272 | print_rq(m, rq, cpu); | 310 | print_rq(m, rq, cpu); |
311 | rcu_read_unlock(); | ||
312 | spin_unlock_irqrestore(&sched_debug_lock, flags); | ||
273 | } | 313 | } |
274 | 314 | ||
275 | static const char *sched_tunable_scaling_names[] = { | 315 | static const char *sched_tunable_scaling_names[] = { |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c62ebae65cf0..354769979c02 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -699,7 +699,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
699 | cfs_rq->nr_running--; | 699 | cfs_rq->nr_running--; |
700 | } | 700 | } |
701 | 701 | ||
702 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 702 | #ifdef CONFIG_FAIR_GROUP_SCHED |
703 | # ifdef CONFIG_SMP | ||
703 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, | 704 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, |
704 | int global_update) | 705 | int global_update) |
705 | { | 706 | { |
@@ -762,6 +763,51 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
762 | list_del_leaf_cfs_rq(cfs_rq); | 763 | list_del_leaf_cfs_rq(cfs_rq); |
763 | } | 764 | } |
764 | 765 | ||
766 | static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | ||
767 | long weight_delta) | ||
768 | { | ||
769 | long load_weight, load, shares; | ||
770 | |||
771 | load = cfs_rq->load.weight + weight_delta; | ||
772 | |||
773 | load_weight = atomic_read(&tg->load_weight); | ||
774 | load_weight -= cfs_rq->load_contribution; | ||
775 | load_weight += load; | ||
776 | |||
777 | shares = (tg->shares * load); | ||
778 | if (load_weight) | ||
779 | shares /= load_weight; | ||
780 | |||
781 | if (shares < MIN_SHARES) | ||
782 | shares = MIN_SHARES; | ||
783 | if (shares > tg->shares) | ||
784 | shares = tg->shares; | ||
785 | |||
786 | return shares; | ||
787 | } | ||
788 | |||
789 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
790 | { | ||
791 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
792 | update_cfs_load(cfs_rq, 0); | ||
793 | update_cfs_shares(cfs_rq, 0); | ||
794 | } | ||
795 | } | ||
796 | # else /* CONFIG_SMP */ | ||
797 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | ||
798 | { | ||
799 | } | ||
800 | |||
801 | static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg, | ||
802 | long weight_delta) | ||
803 | { | ||
804 | return tg->shares; | ||
805 | } | ||
806 | |||
807 | static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
808 | { | ||
809 | } | ||
810 | # endif /* CONFIG_SMP */ | ||
765 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, | 811 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, |
766 | unsigned long weight) | 812 | unsigned long weight) |
767 | { | 813 | { |
@@ -782,7 +828,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
782 | { | 828 | { |
783 | struct task_group *tg; | 829 | struct task_group *tg; |
784 | struct sched_entity *se; | 830 | struct sched_entity *se; |
785 | long load_weight, load, shares; | 831 | long shares; |
786 | 832 | ||
787 | if (!cfs_rq) | 833 | if (!cfs_rq) |
788 | return; | 834 | return; |
@@ -791,32 +837,14 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
791 | se = tg->se[cpu_of(rq_of(cfs_rq))]; | 837 | se = tg->se[cpu_of(rq_of(cfs_rq))]; |
792 | if (!se) | 838 | if (!se) |
793 | return; | 839 | return; |
794 | 840 | #ifndef CONFIG_SMP | |
795 | load = cfs_rq->load.weight + weight_delta; | 841 | if (likely(se->load.weight == tg->shares)) |
796 | 842 | return; | |
797 | load_weight = atomic_read(&tg->load_weight); | 843 | #endif |
798 | load_weight -= cfs_rq->load_contribution; | 844 | shares = calc_cfs_shares(cfs_rq, tg, weight_delta); |
799 | load_weight += load; | ||
800 | |||
801 | shares = (tg->shares * load); | ||
802 | if (load_weight) | ||
803 | shares /= load_weight; | ||
804 | |||
805 | if (shares < MIN_SHARES) | ||
806 | shares = MIN_SHARES; | ||
807 | if (shares > tg->shares) | ||
808 | shares = tg->shares; | ||
809 | 845 | ||
810 | reweight_entity(cfs_rq_of(se), se, shares); | 846 | reweight_entity(cfs_rq_of(se), se, shares); |
811 | } | 847 | } |
812 | |||
813 | static void update_entity_shares_tick(struct cfs_rq *cfs_rq) | ||
814 | { | ||
815 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | ||
816 | update_cfs_load(cfs_rq, 0); | ||
817 | update_cfs_shares(cfs_rq, 0); | ||
818 | } | ||
819 | } | ||
820 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 848 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
821 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | 849 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) |
822 | { | 850 | { |
@@ -1062,6 +1090,9 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
1062 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 1090 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
1063 | s64 delta = curr->vruntime - se->vruntime; | 1091 | s64 delta = curr->vruntime - se->vruntime; |
1064 | 1092 | ||
1093 | if (delta < 0) | ||
1094 | return; | ||
1095 | |||
1065 | if (delta > ideal_runtime) | 1096 | if (delta > ideal_runtime) |
1066 | resched_task(rq_of(cfs_rq)->curr); | 1097 | resched_task(rq_of(cfs_rq)->curr); |
1067 | } | 1098 | } |
@@ -1362,27 +1393,27 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
1362 | return wl; | 1393 | return wl; |
1363 | 1394 | ||
1364 | for_each_sched_entity(se) { | 1395 | for_each_sched_entity(se) { |
1365 | long S, rw, s, a, b; | 1396 | long lw, w; |
1366 | 1397 | ||
1367 | S = se->my_q->tg->shares; | 1398 | tg = se->my_q->tg; |
1368 | s = se->load.weight; | 1399 | w = se->my_q->load.weight; |
1369 | rw = se->my_q->load.weight; | ||
1370 | 1400 | ||
1371 | a = S*(rw + wl); | 1401 | /* use this cpu's instantaneous contribution */ |
1372 | b = S*rw + s*wg; | 1402 | lw = atomic_read(&tg->load_weight); |
1403 | lw -= se->my_q->load_contribution; | ||
1404 | lw += w + wg; | ||
1373 | 1405 | ||
1374 | wl = s*(a-b); | 1406 | wl += w; |
1375 | 1407 | ||
1376 | if (likely(b)) | 1408 | if (lw > 0 && wl < lw) |
1377 | wl /= b; | 1409 | wl = (wl * tg->shares) / lw; |
1410 | else | ||
1411 | wl = tg->shares; | ||
1378 | 1412 | ||
1379 | /* | 1413 | /* zero point is MIN_SHARES */ |
1380 | * Assume the group is already running and will | 1414 | if (wl < MIN_SHARES) |
1381 | * thus already be accounted for in the weight. | 1415 | wl = MIN_SHARES; |
1382 | * | 1416 | wl -= se->load.weight; |
1383 | * That is, moving shares between CPUs, does not | ||
1384 | * alter the group weight. | ||
1385 | */ | ||
1386 | wg = 0; | 1417 | wg = 0; |
1387 | } | 1418 | } |
1388 | 1419 | ||
diff --git a/kernel/smp.c b/kernel/smp.c index 4ec30e069987..9910744f0856 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -194,23 +194,52 @@ void generic_smp_call_function_interrupt(void) | |||
194 | */ | 194 | */ |
195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { | 195 | list_for_each_entry_rcu(data, &call_function.queue, csd.list) { |
196 | int refs; | 196 | int refs; |
197 | void (*func) (void *info); | ||
197 | 198 | ||
198 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) | 199 | /* |
200 | * Since we walk the list without any locks, we might | ||
201 | * see an entry that was completed, removed from the | ||
202 | * list and is in the process of being reused. | ||
203 | * | ||
204 | * We must check that the cpu is in the cpumask before | ||
205 | * checking the refs, and both must be set before | ||
206 | * executing the callback on this cpu. | ||
207 | */ | ||
208 | |||
209 | if (!cpumask_test_cpu(cpu, data->cpumask)) | ||
210 | continue; | ||
211 | |||
212 | smp_rmb(); | ||
213 | |||
214 | if (atomic_read(&data->refs) == 0) | ||
199 | continue; | 215 | continue; |
200 | 216 | ||
217 | func = data->csd.func; /* for later warn */ | ||
201 | data->csd.func(data->csd.info); | 218 | data->csd.func(data->csd.info); |
202 | 219 | ||
220 | /* | ||
221 | * If the cpu mask is not still set then it enabled interrupts, | ||
222 | * we took another smp interrupt, and executed the function | ||
223 | * twice on this cpu. In theory that copy decremented refs. | ||
224 | */ | ||
225 | if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) { | ||
226 | WARN(1, "%pS enabled interrupts and double executed\n", | ||
227 | func); | ||
228 | continue; | ||
229 | } | ||
230 | |||
203 | refs = atomic_dec_return(&data->refs); | 231 | refs = atomic_dec_return(&data->refs); |
204 | WARN_ON(refs < 0); | 232 | WARN_ON(refs < 0); |
205 | if (!refs) { | ||
206 | raw_spin_lock(&call_function.lock); | ||
207 | list_del_rcu(&data->csd.list); | ||
208 | raw_spin_unlock(&call_function.lock); | ||
209 | } | ||
210 | 233 | ||
211 | if (refs) | 234 | if (refs) |
212 | continue; | 235 | continue; |
213 | 236 | ||
237 | WARN_ON(!cpumask_empty(data->cpumask)); | ||
238 | |||
239 | raw_spin_lock(&call_function.lock); | ||
240 | list_del_rcu(&data->csd.list); | ||
241 | raw_spin_unlock(&call_function.lock); | ||
242 | |||
214 | csd_unlock(&data->csd); | 243 | csd_unlock(&data->csd); |
215 | } | 244 | } |
216 | 245 | ||
@@ -430,7 +459,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
430 | * can't happen. | 459 | * can't happen. |
431 | */ | 460 | */ |
432 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() | 461 | WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
433 | && !oops_in_progress); | 462 | && !oops_in_progress && !early_boot_irqs_disabled); |
434 | 463 | ||
435 | /* So, what's a CPU they want? Ignoring this one. */ | 464 | /* So, what's a CPU they want? Ignoring this one. */ |
436 | cpu = cpumask_first_and(mask, cpu_online_mask); | 465 | cpu = cpumask_first_and(mask, cpu_online_mask); |
@@ -454,11 +483,21 @@ void smp_call_function_many(const struct cpumask *mask, | |||
454 | 483 | ||
455 | data = &__get_cpu_var(cfd_data); | 484 | data = &__get_cpu_var(cfd_data); |
456 | csd_lock(&data->csd); | 485 | csd_lock(&data->csd); |
486 | BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); | ||
457 | 487 | ||
458 | data->csd.func = func; | 488 | data->csd.func = func; |
459 | data->csd.info = info; | 489 | data->csd.info = info; |
460 | cpumask_and(data->cpumask, mask, cpu_online_mask); | 490 | cpumask_and(data->cpumask, mask, cpu_online_mask); |
461 | cpumask_clear_cpu(this_cpu, data->cpumask); | 491 | cpumask_clear_cpu(this_cpu, data->cpumask); |
492 | |||
493 | /* | ||
494 | * To ensure the interrupt handler gets an complete view | ||
495 | * we order the cpumask and refs writes and order the read | ||
496 | * of them in the interrupt handler. In addition we may | ||
497 | * only clear our own cpu bit from the mask. | ||
498 | */ | ||
499 | smp_wmb(); | ||
500 | |||
462 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); | 501 | atomic_set(&data->refs, cpumask_weight(data->cpumask)); |
463 | 502 | ||
464 | raw_spin_lock_irqsave(&call_function.lock, flags); | 503 | raw_spin_lock_irqsave(&call_function.lock, flags); |
@@ -533,17 +572,20 @@ void ipi_call_unlock_irq(void) | |||
533 | #endif /* USE_GENERIC_SMP_HELPERS */ | 572 | #endif /* USE_GENERIC_SMP_HELPERS */ |
534 | 573 | ||
535 | /* | 574 | /* |
536 | * Call a function on all processors | 575 | * Call a function on all processors. May be used during early boot while |
576 | * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead | ||
577 | * of local_irq_disable/enable(). | ||
537 | */ | 578 | */ |
538 | int on_each_cpu(void (*func) (void *info), void *info, int wait) | 579 | int on_each_cpu(void (*func) (void *info), void *info, int wait) |
539 | { | 580 | { |
581 | unsigned long flags; | ||
540 | int ret = 0; | 582 | int ret = 0; |
541 | 583 | ||
542 | preempt_disable(); | 584 | preempt_disable(); |
543 | ret = smp_call_function(func, info, wait); | 585 | ret = smp_call_function(func, info, wait); |
544 | local_irq_disable(); | 586 | local_irq_save(flags); |
545 | func(info); | 587 | func(info); |
546 | local_irq_enable(); | 588 | local_irq_restore(flags); |
547 | preempt_enable(); | 589 | preempt_enable(); |
548 | return ret; | 590 | return ret; |
549 | } | 591 | } |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bc86bb32e126..0f1bd83db985 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -170,7 +170,8 @@ static int proc_taint(struct ctl_table *table, int write, | |||
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | #ifdef CONFIG_MAGIC_SYSRQ | 172 | #ifdef CONFIG_MAGIC_SYSRQ |
173 | static int __sysrq_enabled; /* Note: sysrq code ises it's own private copy */ | 173 | /* Note: sysrq code uses it's own private copy */ |
174 | static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE; | ||
174 | 175 | ||
175 | static int sysrq_sysctl_handler(ctl_table *table, int write, | 176 | static int sysrq_sysctl_handler(ctl_table *table, int write, |
176 | void __user *buffer, size_t *lenp, | 177 | void __user *buffer, size_t *lenp, |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3e216e01bbd1..c55ea2433471 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -642,8 +642,7 @@ static void tick_nohz_switch_to_nohz(void) | |||
642 | } | 642 | } |
643 | local_irq_enable(); | 643 | local_irq_enable(); |
644 | 644 | ||
645 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", | 645 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); |
646 | smp_processor_id()); | ||
647 | } | 646 | } |
648 | 647 | ||
649 | /* | 648 | /* |
@@ -795,8 +794,10 @@ void tick_setup_sched_timer(void) | |||
795 | } | 794 | } |
796 | 795 | ||
797 | #ifdef CONFIG_NO_HZ | 796 | #ifdef CONFIG_NO_HZ |
798 | if (tick_nohz_enabled) | 797 | if (tick_nohz_enabled) { |
799 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 798 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
799 | printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); | ||
800 | } | ||
800 | #endif | 801 | #endif |
801 | } | 802 | } |
802 | #endif /* HIGH_RES_TIMERS */ | 803 | #endif /* HIGH_RES_TIMERS */ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 5cf8c602b880..92b6e1e12d98 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -453,14 +453,6 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) | |||
453 | * Stubs: | 453 | * Stubs: |
454 | */ | 454 | */ |
455 | 455 | ||
456 | void early_boot_irqs_off(void) | ||
457 | { | ||
458 | } | ||
459 | |||
460 | void early_boot_irqs_on(void) | ||
461 | { | ||
462 | } | ||
463 | |||
464 | void trace_softirqs_on(unsigned long ip) | 456 | void trace_softirqs_on(unsigned long ip) |
465 | { | 457 | { |
466 | } | 458 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8ee6ec82f88a..11869faa6819 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -768,7 +768,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
768 | 768 | ||
769 | worker->flags &= ~flags; | 769 | worker->flags &= ~flags; |
770 | 770 | ||
771 | /* if transitioning out of NOT_RUNNING, increment nr_running */ | 771 | /* |
772 | * If transitioning out of NOT_RUNNING, increment nr_running. Note | ||
773 | * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask | ||
774 | * of multiple flags, not a single flag. | ||
775 | */ | ||
772 | if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) | 776 | if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) |
773 | if (!(worker->flags & WORKER_NOT_RUNNING)) | 777 | if (!(worker->flags & WORKER_NOT_RUNNING)) |
774 | atomic_inc(get_gcwq_nr_running(gcwq->cpu)); | 778 | atomic_inc(get_gcwq_nr_running(gcwq->cpu)); |
@@ -1840,7 +1844,7 @@ __acquires(&gcwq->lock) | |||
1840 | spin_unlock_irq(&gcwq->lock); | 1844 | spin_unlock_irq(&gcwq->lock); |
1841 | 1845 | ||
1842 | work_clear_pending(work); | 1846 | work_clear_pending(work); |
1843 | lock_map_acquire(&cwq->wq->lockdep_map); | 1847 | lock_map_acquire_read(&cwq->wq->lockdep_map); |
1844 | lock_map_acquire(&lockdep_map); | 1848 | lock_map_acquire(&lockdep_map); |
1845 | trace_workqueue_execute_start(work); | 1849 | trace_workqueue_execute_start(work); |
1846 | f(work); | 1850 | f(work); |
@@ -2384,8 +2388,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, | |||
2384 | insert_wq_barrier(cwq, barr, work, worker); | 2388 | insert_wq_barrier(cwq, barr, work, worker); |
2385 | spin_unlock_irq(&gcwq->lock); | 2389 | spin_unlock_irq(&gcwq->lock); |
2386 | 2390 | ||
2387 | lock_map_acquire(&cwq->wq->lockdep_map); | 2391 | /* |
2392 | * If @max_active is 1 or rescuer is in use, flushing another work | ||
2393 | * item on the same workqueue may lead to deadlock. Make sure the | ||
2394 | * flusher is not running on the same workqueue by verifying write | ||
2395 | * access. | ||
2396 | */ | ||
2397 | if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) | ||
2398 | lock_map_acquire(&cwq->wq->lockdep_map); | ||
2399 | else | ||
2400 | lock_map_acquire_read(&cwq->wq->lockdep_map); | ||
2388 | lock_map_release(&cwq->wq->lockdep_map); | 2401 | lock_map_release(&cwq->wq->lockdep_map); |
2402 | |||
2389 | return true; | 2403 | return true; |
2390 | already_gone: | 2404 | already_gone: |
2391 | spin_unlock_irq(&gcwq->lock); | 2405 | spin_unlock_irq(&gcwq->lock); |