diff options
Diffstat (limited to 'kernel')
58 files changed, 3881 insertions, 2596 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index 2251882daf53..44511d100eaa 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks | |||
@@ -87,6 +87,9 @@ config ARCH_INLINE_WRITE_UNLOCK_IRQ | |||
87 | config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | 87 | config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE |
88 | bool | 88 | bool |
89 | 89 | ||
90 | config UNINLINE_SPIN_UNLOCK | ||
91 | bool | ||
92 | |||
90 | # | 93 | # |
91 | # lock_* functions are inlined when: | 94 | # lock_* functions are inlined when: |
92 | # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y | 95 | # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y |
@@ -103,100 +106,120 @@ config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | |||
103 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | 106 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y |
104 | # | 107 | # |
105 | 108 | ||
109 | if !DEBUG_SPINLOCK | ||
110 | |||
106 | config INLINE_SPIN_TRYLOCK | 111 | config INLINE_SPIN_TRYLOCK |
107 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK | 112 | def_bool y |
113 | depends on ARCH_INLINE_SPIN_TRYLOCK | ||
108 | 114 | ||
109 | config INLINE_SPIN_TRYLOCK_BH | 115 | config INLINE_SPIN_TRYLOCK_BH |
110 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH | 116 | def_bool y |
117 | depends on ARCH_INLINE_SPIN_TRYLOCK_BH | ||
111 | 118 | ||
112 | config INLINE_SPIN_LOCK | 119 | config INLINE_SPIN_LOCK |
113 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK | 120 | def_bool y |
121 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK | ||
114 | 122 | ||
115 | config INLINE_SPIN_LOCK_BH | 123 | config INLINE_SPIN_LOCK_BH |
116 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 124 | def_bool y |
117 | ARCH_INLINE_SPIN_LOCK_BH | 125 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_BH |
118 | 126 | ||
119 | config INLINE_SPIN_LOCK_IRQ | 127 | config INLINE_SPIN_LOCK_IRQ |
120 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 128 | def_bool y |
121 | ARCH_INLINE_SPIN_LOCK_IRQ | 129 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQ |
122 | 130 | ||
123 | config INLINE_SPIN_LOCK_IRQSAVE | 131 | config INLINE_SPIN_LOCK_IRQSAVE |
124 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 132 | def_bool y |
125 | ARCH_INLINE_SPIN_LOCK_IRQSAVE | 133 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQSAVE |
126 | |||
127 | config UNINLINE_SPIN_UNLOCK | ||
128 | bool | ||
129 | 134 | ||
130 | config INLINE_SPIN_UNLOCK_BH | 135 | config INLINE_SPIN_UNLOCK_BH |
131 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH | 136 | def_bool y |
137 | depends on ARCH_INLINE_SPIN_UNLOCK_BH | ||
132 | 138 | ||
133 | config INLINE_SPIN_UNLOCK_IRQ | 139 | config INLINE_SPIN_UNLOCK_IRQ |
134 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH) | 140 | def_bool y |
141 | depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH | ||
135 | 142 | ||
136 | config INLINE_SPIN_UNLOCK_IRQRESTORE | 143 | config INLINE_SPIN_UNLOCK_IRQRESTORE |
137 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | 144 | def_bool y |
145 | depends on ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
138 | 146 | ||
139 | 147 | ||
140 | config INLINE_READ_TRYLOCK | 148 | config INLINE_READ_TRYLOCK |
141 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK | 149 | def_bool y |
150 | depends on ARCH_INLINE_READ_TRYLOCK | ||
142 | 151 | ||
143 | config INLINE_READ_LOCK | 152 | config INLINE_READ_LOCK |
144 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK | 153 | def_bool y |
154 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK | ||
145 | 155 | ||
146 | config INLINE_READ_LOCK_BH | 156 | config INLINE_READ_LOCK_BH |
147 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 157 | def_bool y |
148 | ARCH_INLINE_READ_LOCK_BH | 158 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_BH |
149 | 159 | ||
150 | config INLINE_READ_LOCK_IRQ | 160 | config INLINE_READ_LOCK_IRQ |
151 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 161 | def_bool y |
152 | ARCH_INLINE_READ_LOCK_IRQ | 162 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQ |
153 | 163 | ||
154 | config INLINE_READ_LOCK_IRQSAVE | 164 | config INLINE_READ_LOCK_IRQSAVE |
155 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 165 | def_bool y |
156 | ARCH_INLINE_READ_LOCK_IRQSAVE | 166 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQSAVE |
157 | 167 | ||
158 | config INLINE_READ_UNLOCK | 168 | config INLINE_READ_UNLOCK |
159 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK) | 169 | def_bool y |
170 | depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK | ||
160 | 171 | ||
161 | config INLINE_READ_UNLOCK_BH | 172 | config INLINE_READ_UNLOCK_BH |
162 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH | 173 | def_bool y |
174 | depends on ARCH_INLINE_READ_UNLOCK_BH | ||
163 | 175 | ||
164 | config INLINE_READ_UNLOCK_IRQ | 176 | config INLINE_READ_UNLOCK_IRQ |
165 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH) | 177 | def_bool y |
178 | depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_BH | ||
166 | 179 | ||
167 | config INLINE_READ_UNLOCK_IRQRESTORE | 180 | config INLINE_READ_UNLOCK_IRQRESTORE |
168 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE | 181 | def_bool y |
182 | depends on ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
169 | 183 | ||
170 | 184 | ||
171 | config INLINE_WRITE_TRYLOCK | 185 | config INLINE_WRITE_TRYLOCK |
172 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK | 186 | def_bool y |
187 | depends on ARCH_INLINE_WRITE_TRYLOCK | ||
173 | 188 | ||
174 | config INLINE_WRITE_LOCK | 189 | config INLINE_WRITE_LOCK |
175 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK | 190 | def_bool y |
191 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK | ||
176 | 192 | ||
177 | config INLINE_WRITE_LOCK_BH | 193 | config INLINE_WRITE_LOCK_BH |
178 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 194 | def_bool y |
179 | ARCH_INLINE_WRITE_LOCK_BH | 195 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_BH |
180 | 196 | ||
181 | config INLINE_WRITE_LOCK_IRQ | 197 | config INLINE_WRITE_LOCK_IRQ |
182 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 198 | def_bool y |
183 | ARCH_INLINE_WRITE_LOCK_IRQ | 199 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQ |
184 | 200 | ||
185 | config INLINE_WRITE_LOCK_IRQSAVE | 201 | config INLINE_WRITE_LOCK_IRQSAVE |
186 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | 202 | def_bool y |
187 | ARCH_INLINE_WRITE_LOCK_IRQSAVE | 203 | depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQSAVE |
188 | 204 | ||
189 | config INLINE_WRITE_UNLOCK | 205 | config INLINE_WRITE_UNLOCK |
190 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK) | 206 | def_bool y |
207 | depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK | ||
191 | 208 | ||
192 | config INLINE_WRITE_UNLOCK_BH | 209 | config INLINE_WRITE_UNLOCK_BH |
193 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH | 210 | def_bool y |
211 | depends on ARCH_INLINE_WRITE_UNLOCK_BH | ||
194 | 212 | ||
195 | config INLINE_WRITE_UNLOCK_IRQ | 213 | config INLINE_WRITE_UNLOCK_IRQ |
196 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH) | 214 | def_bool y |
215 | depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH | ||
197 | 216 | ||
198 | config INLINE_WRITE_UNLOCK_IRQRESTORE | 217 | config INLINE_WRITE_UNLOCK_IRQRESTORE |
199 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | 218 | def_bool y |
219 | depends on ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
220 | |||
221 | endif | ||
200 | 222 | ||
201 | config MUTEX_SPIN_ON_OWNER | 223 | config MUTEX_SPIN_ON_OWNER |
202 | def_bool SMP && !DEBUG_MUTEXES | 224 | def_bool y |
225 | depends on SMP && !DEBUG_MUTEXES | ||
diff --git a/kernel/Makefile b/kernel/Makefile index c0cc67ad764c..5404911eaee9 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \ | |||
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o cred.o \ | 12 | notifier.o ksysfs.o cred.o \ |
13 | async.o range.o groups.o lglock.o | 13 | async.o range.o groups.o lglock.o smpboot.o |
14 | 14 | ||
15 | ifdef CONFIG_FUNCTION_TRACER | 15 | ifdef CONFIG_FUNCTION_TRACER |
16 | # Do not trace debug files and internal ftrace files | 16 | # Do not trace debug files and internal ftrace files |
@@ -46,7 +46,6 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | |||
46 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o | 46 | obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o |
47 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 47 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
48 | obj-$(CONFIG_SMP) += smp.o | 48 | obj-$(CONFIG_SMP) += smp.o |
49 | obj-$(CONFIG_SMP) += smpboot.o | ||
50 | ifneq ($(CONFIG_SMP),y) | 49 | ifneq ($(CONFIG_SMP),y) |
51 | obj-y += up.o | 50 | obj-y += up.o |
52 | endif | 51 | endif |
@@ -98,7 +97,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o | |||
98 | obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o | 97 | obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o |
99 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ | 98 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
100 | obj-$(CONFIG_TRACING) += trace/ | 99 | obj-$(CONFIG_TRACING) += trace/ |
101 | obj-$(CONFIG_X86_DS) += trace/ | 100 | obj-$(CONFIG_TRACE_CLOCK) += trace/ |
102 | obj-$(CONFIG_RING_BUFFER) += trace/ | 101 | obj-$(CONFIG_RING_BUFFER) += trace/ |
103 | obj-$(CONFIG_TRACEPOINTS) += trace/ | 102 | obj-$(CONFIG_TRACEPOINTS) += trace/ |
104 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 103 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 14d32588cccd..f560598807c1 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -280,12 +280,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
280 | __func__, cpu); | 280 | __func__, cpu); |
281 | goto out_release; | 281 | goto out_release; |
282 | } | 282 | } |
283 | smpboot_park_threads(cpu); | ||
283 | 284 | ||
284 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); | 285 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
285 | if (err) { | 286 | if (err) { |
286 | /* CPU didn't die: tell everyone. Can't complain. */ | 287 | /* CPU didn't die: tell everyone. Can't complain. */ |
288 | smpboot_unpark_threads(cpu); | ||
287 | cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); | 289 | cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); |
288 | |||
289 | goto out_release; | 290 | goto out_release; |
290 | } | 291 | } |
291 | BUG_ON(cpu_online(cpu)); | 292 | BUG_ON(cpu_online(cpu)); |
@@ -354,6 +355,10 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
354 | goto out; | 355 | goto out; |
355 | } | 356 | } |
356 | 357 | ||
358 | ret = smpboot_create_threads(cpu); | ||
359 | if (ret) | ||
360 | goto out; | ||
361 | |||
357 | ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); | 362 | ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); |
358 | if (ret) { | 363 | if (ret) { |
359 | nr_calls--; | 364 | nr_calls--; |
@@ -368,6 +373,9 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
368 | goto out_notify; | 373 | goto out_notify; |
369 | BUG_ON(!cpu_online(cpu)); | 374 | BUG_ON(!cpu_online(cpu)); |
370 | 375 | ||
376 | /* Wake the per cpu threads */ | ||
377 | smpboot_unpark_threads(cpu); | ||
378 | |||
371 | /* Now call notifier in preparation. */ | 379 | /* Now call notifier in preparation. */ |
372 | cpu_notify(CPU_ONLINE | mod, hcpu); | 380 | cpu_notify(CPU_ONLINE | mod, hcpu); |
373 | 381 | ||
@@ -439,14 +447,6 @@ EXPORT_SYMBOL_GPL(cpu_up); | |||
439 | #ifdef CONFIG_PM_SLEEP_SMP | 447 | #ifdef CONFIG_PM_SLEEP_SMP |
440 | static cpumask_var_t frozen_cpus; | 448 | static cpumask_var_t frozen_cpus; |
441 | 449 | ||
442 | void __weak arch_disable_nonboot_cpus_begin(void) | ||
443 | { | ||
444 | } | ||
445 | |||
446 | void __weak arch_disable_nonboot_cpus_end(void) | ||
447 | { | ||
448 | } | ||
449 | |||
450 | int disable_nonboot_cpus(void) | 450 | int disable_nonboot_cpus(void) |
451 | { | 451 | { |
452 | int cpu, first_cpu, error = 0; | 452 | int cpu, first_cpu, error = 0; |
@@ -458,7 +458,6 @@ int disable_nonboot_cpus(void) | |||
458 | * with the userspace trying to use the CPU hotplug at the same time | 458 | * with the userspace trying to use the CPU hotplug at the same time |
459 | */ | 459 | */ |
460 | cpumask_clear(frozen_cpus); | 460 | cpumask_clear(frozen_cpus); |
461 | arch_disable_nonboot_cpus_begin(); | ||
462 | 461 | ||
463 | printk("Disabling non-boot CPUs ...\n"); | 462 | printk("Disabling non-boot CPUs ...\n"); |
464 | for_each_online_cpu(cpu) { | 463 | for_each_online_cpu(cpu) { |
@@ -474,8 +473,6 @@ int disable_nonboot_cpus(void) | |||
474 | } | 473 | } |
475 | } | 474 | } |
476 | 475 | ||
477 | arch_disable_nonboot_cpus_end(); | ||
478 | |||
479 | if (!error) { | 476 | if (!error) { |
480 | BUG_ON(num_online_cpus() > 1); | 477 | BUG_ON(num_online_cpus() > 1); |
481 | /* Make sure the CPUs won't be enabled by someone else */ | 478 | /* Make sure the CPUs won't be enabled by someone else */ |
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 98d4597f43d6..c77206184b8b 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c | |||
@@ -159,6 +159,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) | |||
159 | int rctx; | 159 | int rctx; |
160 | struct perf_callchain_entry *entry; | 160 | struct perf_callchain_entry *entry; |
161 | 161 | ||
162 | int kernel = !event->attr.exclude_callchain_kernel; | ||
163 | int user = !event->attr.exclude_callchain_user; | ||
164 | |||
165 | if (!kernel && !user) | ||
166 | return NULL; | ||
162 | 167 | ||
163 | entry = get_callchain_entry(&rctx); | 168 | entry = get_callchain_entry(&rctx); |
164 | if (rctx == -1) | 169 | if (rctx == -1) |
@@ -169,24 +174,29 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) | |||
169 | 174 | ||
170 | entry->nr = 0; | 175 | entry->nr = 0; |
171 | 176 | ||
172 | if (!user_mode(regs)) { | 177 | if (kernel && !user_mode(regs)) { |
173 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); | 178 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); |
174 | perf_callchain_kernel(entry, regs); | 179 | perf_callchain_kernel(entry, regs); |
175 | if (current->mm) | ||
176 | regs = task_pt_regs(current); | ||
177 | else | ||
178 | regs = NULL; | ||
179 | } | 180 | } |
180 | 181 | ||
181 | if (regs) { | 182 | if (user) { |
182 | /* | 183 | if (!user_mode(regs)) { |
183 | * Disallow cross-task user callchains. | 184 | if (current->mm) |
184 | */ | 185 | regs = task_pt_regs(current); |
185 | if (event->ctx->task && event->ctx->task != current) | 186 | else |
186 | goto exit_put; | 187 | regs = NULL; |
187 | 188 | } | |
188 | perf_callchain_store(entry, PERF_CONTEXT_USER); | 189 | |
189 | perf_callchain_user(entry, regs); | 190 | if (regs) { |
191 | /* | ||
192 | * Disallow cross-task user callchains. | ||
193 | */ | ||
194 | if (event->ctx->task && event->ctx->task != current) | ||
195 | goto exit_put; | ||
196 | |||
197 | perf_callchain_store(entry, PERF_CONTEXT_USER); | ||
198 | perf_callchain_user(entry, regs); | ||
199 | } | ||
190 | } | 200 | } |
191 | 201 | ||
192 | exit_put: | 202 | exit_put: |
diff --git a/kernel/events/core.c b/kernel/events/core.c index b7935fcec7d9..7b9df353ba1b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/perf_event.h> | 36 | #include <linux/perf_event.h> |
37 | #include <linux/ftrace_event.h> | 37 | #include <linux/ftrace_event.h> |
38 | #include <linux/hw_breakpoint.h> | 38 | #include <linux/hw_breakpoint.h> |
39 | #include <linux/mm_types.h> | ||
39 | 40 | ||
40 | #include "internal.h" | 41 | #include "internal.h" |
41 | 42 | ||
@@ -1253,7 +1254,7 @@ retry: | |||
1253 | /* | 1254 | /* |
1254 | * Cross CPU call to disable a performance event | 1255 | * Cross CPU call to disable a performance event |
1255 | */ | 1256 | */ |
1256 | static int __perf_event_disable(void *info) | 1257 | int __perf_event_disable(void *info) |
1257 | { | 1258 | { |
1258 | struct perf_event *event = info; | 1259 | struct perf_event *event = info; |
1259 | struct perf_event_context *ctx = event->ctx; | 1260 | struct perf_event_context *ctx = event->ctx; |
@@ -2935,12 +2936,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel); | |||
2935 | /* | 2936 | /* |
2936 | * Called when the last reference to the file is gone. | 2937 | * Called when the last reference to the file is gone. |
2937 | */ | 2938 | */ |
2938 | static int perf_release(struct inode *inode, struct file *file) | 2939 | static void put_event(struct perf_event *event) |
2939 | { | 2940 | { |
2940 | struct perf_event *event = file->private_data; | ||
2941 | struct task_struct *owner; | 2941 | struct task_struct *owner; |
2942 | 2942 | ||
2943 | file->private_data = NULL; | 2943 | if (!atomic_long_dec_and_test(&event->refcount)) |
2944 | return; | ||
2944 | 2945 | ||
2945 | rcu_read_lock(); | 2946 | rcu_read_lock(); |
2946 | owner = ACCESS_ONCE(event->owner); | 2947 | owner = ACCESS_ONCE(event->owner); |
@@ -2975,7 +2976,13 @@ static int perf_release(struct inode *inode, struct file *file) | |||
2975 | put_task_struct(owner); | 2976 | put_task_struct(owner); |
2976 | } | 2977 | } |
2977 | 2978 | ||
2978 | return perf_event_release_kernel(event); | 2979 | perf_event_release_kernel(event); |
2980 | } | ||
2981 | |||
2982 | static int perf_release(struct inode *inode, struct file *file) | ||
2983 | { | ||
2984 | put_event(file->private_data); | ||
2985 | return 0; | ||
2979 | } | 2986 | } |
2980 | 2987 | ||
2981 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) | 2988 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) |
@@ -3227,7 +3234,7 @@ unlock: | |||
3227 | 3234 | ||
3228 | static const struct file_operations perf_fops; | 3235 | static const struct file_operations perf_fops; |
3229 | 3236 | ||
3230 | static struct perf_event *perf_fget_light(int fd, int *fput_needed) | 3237 | static struct file *perf_fget_light(int fd, int *fput_needed) |
3231 | { | 3238 | { |
3232 | struct file *file; | 3239 | struct file *file; |
3233 | 3240 | ||
@@ -3241,7 +3248,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed) | |||
3241 | return ERR_PTR(-EBADF); | 3248 | return ERR_PTR(-EBADF); |
3242 | } | 3249 | } |
3243 | 3250 | ||
3244 | return file->private_data; | 3251 | return file; |
3245 | } | 3252 | } |
3246 | 3253 | ||
3247 | static int perf_event_set_output(struct perf_event *event, | 3254 | static int perf_event_set_output(struct perf_event *event, |
@@ -3273,19 +3280,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
3273 | 3280 | ||
3274 | case PERF_EVENT_IOC_SET_OUTPUT: | 3281 | case PERF_EVENT_IOC_SET_OUTPUT: |
3275 | { | 3282 | { |
3283 | struct file *output_file = NULL; | ||
3276 | struct perf_event *output_event = NULL; | 3284 | struct perf_event *output_event = NULL; |
3277 | int fput_needed = 0; | 3285 | int fput_needed = 0; |
3278 | int ret; | 3286 | int ret; |
3279 | 3287 | ||
3280 | if (arg != -1) { | 3288 | if (arg != -1) { |
3281 | output_event = perf_fget_light(arg, &fput_needed); | 3289 | output_file = perf_fget_light(arg, &fput_needed); |
3282 | if (IS_ERR(output_event)) | 3290 | if (IS_ERR(output_file)) |
3283 | return PTR_ERR(output_event); | 3291 | return PTR_ERR(output_file); |
3292 | output_event = output_file->private_data; | ||
3284 | } | 3293 | } |
3285 | 3294 | ||
3286 | ret = perf_event_set_output(event, output_event); | 3295 | ret = perf_event_set_output(event, output_event); |
3287 | if (output_event) | 3296 | if (output_event) |
3288 | fput_light(output_event->filp, fput_needed); | 3297 | fput_light(output_file, fput_needed); |
3289 | 3298 | ||
3290 | return ret; | 3299 | return ret; |
3291 | } | 3300 | } |
@@ -3756,6 +3765,132 @@ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) | |||
3756 | } | 3765 | } |
3757 | EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); | 3766 | EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); |
3758 | 3767 | ||
3768 | static void | ||
3769 | perf_output_sample_regs(struct perf_output_handle *handle, | ||
3770 | struct pt_regs *regs, u64 mask) | ||
3771 | { | ||
3772 | int bit; | ||
3773 | |||
3774 | for_each_set_bit(bit, (const unsigned long *) &mask, | ||
3775 | sizeof(mask) * BITS_PER_BYTE) { | ||
3776 | u64 val; | ||
3777 | |||
3778 | val = perf_reg_value(regs, bit); | ||
3779 | perf_output_put(handle, val); | ||
3780 | } | ||
3781 | } | ||
3782 | |||
3783 | static void perf_sample_regs_user(struct perf_regs_user *regs_user, | ||
3784 | struct pt_regs *regs) | ||
3785 | { | ||
3786 | if (!user_mode(regs)) { | ||
3787 | if (current->mm) | ||
3788 | regs = task_pt_regs(current); | ||
3789 | else | ||
3790 | regs = NULL; | ||
3791 | } | ||
3792 | |||
3793 | if (regs) { | ||
3794 | regs_user->regs = regs; | ||
3795 | regs_user->abi = perf_reg_abi(current); | ||
3796 | } | ||
3797 | } | ||
3798 | |||
3799 | /* | ||
3800 | * Get remaining task size from user stack pointer. | ||
3801 | * | ||
3802 | * It'd be better to take stack vma map and limit this more | ||
3803 | * precisly, but there's no way to get it safely under interrupt, | ||
3804 | * so using TASK_SIZE as limit. | ||
3805 | */ | ||
3806 | static u64 perf_ustack_task_size(struct pt_regs *regs) | ||
3807 | { | ||
3808 | unsigned long addr = perf_user_stack_pointer(regs); | ||
3809 | |||
3810 | if (!addr || addr >= TASK_SIZE) | ||
3811 | return 0; | ||
3812 | |||
3813 | return TASK_SIZE - addr; | ||
3814 | } | ||
3815 | |||
3816 | static u16 | ||
3817 | perf_sample_ustack_size(u16 stack_size, u16 header_size, | ||
3818 | struct pt_regs *regs) | ||
3819 | { | ||
3820 | u64 task_size; | ||
3821 | |||
3822 | /* No regs, no stack pointer, no dump. */ | ||
3823 | if (!regs) | ||
3824 | return 0; | ||
3825 | |||
3826 | /* | ||
3827 | * Check if we fit in with the requested stack size into the: | ||
3828 | * - TASK_SIZE | ||
3829 | * If we don't, we limit the size to the TASK_SIZE. | ||
3830 | * | ||
3831 | * - remaining sample size | ||
3832 | * If we don't, we customize the stack size to | ||
3833 | * fit in to the remaining sample size. | ||
3834 | */ | ||
3835 | |||
3836 | task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); | ||
3837 | stack_size = min(stack_size, (u16) task_size); | ||
3838 | |||
3839 | /* Current header size plus static size and dynamic size. */ | ||
3840 | header_size += 2 * sizeof(u64); | ||
3841 | |||
3842 | /* Do we fit in with the current stack dump size? */ | ||
3843 | if ((u16) (header_size + stack_size) < header_size) { | ||
3844 | /* | ||
3845 | * If we overflow the maximum size for the sample, | ||
3846 | * we customize the stack dump size to fit in. | ||
3847 | */ | ||
3848 | stack_size = USHRT_MAX - header_size - sizeof(u64); | ||
3849 | stack_size = round_up(stack_size, sizeof(u64)); | ||
3850 | } | ||
3851 | |||
3852 | return stack_size; | ||
3853 | } | ||
3854 | |||
3855 | static void | ||
3856 | perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, | ||
3857 | struct pt_regs *regs) | ||
3858 | { | ||
3859 | /* Case of a kernel thread, nothing to dump */ | ||
3860 | if (!regs) { | ||
3861 | u64 size = 0; | ||
3862 | perf_output_put(handle, size); | ||
3863 | } else { | ||
3864 | unsigned long sp; | ||
3865 | unsigned int rem; | ||
3866 | u64 dyn_size; | ||
3867 | |||
3868 | /* | ||
3869 | * We dump: | ||
3870 | * static size | ||
3871 | * - the size requested by user or the best one we can fit | ||
3872 | * in to the sample max size | ||
3873 | * data | ||
3874 | * - user stack dump data | ||
3875 | * dynamic size | ||
3876 | * - the actual dumped size | ||
3877 | */ | ||
3878 | |||
3879 | /* Static size. */ | ||
3880 | perf_output_put(handle, dump_size); | ||
3881 | |||
3882 | /* Data. */ | ||
3883 | sp = perf_user_stack_pointer(regs); | ||
3884 | rem = __output_copy_user(handle, (void *) sp, dump_size); | ||
3885 | dyn_size = dump_size - rem; | ||
3886 | |||
3887 | perf_output_skip(handle, rem); | ||
3888 | |||
3889 | /* Dynamic size. */ | ||
3890 | perf_output_put(handle, dyn_size); | ||
3891 | } | ||
3892 | } | ||
3893 | |||
3759 | static void __perf_event_header__init_id(struct perf_event_header *header, | 3894 | static void __perf_event_header__init_id(struct perf_event_header *header, |
3760 | struct perf_sample_data *data, | 3895 | struct perf_sample_data *data, |
3761 | struct perf_event *event) | 3896 | struct perf_event *event) |
@@ -4016,6 +4151,28 @@ void perf_output_sample(struct perf_output_handle *handle, | |||
4016 | perf_output_put(handle, nr); | 4151 | perf_output_put(handle, nr); |
4017 | } | 4152 | } |
4018 | } | 4153 | } |
4154 | |||
4155 | if (sample_type & PERF_SAMPLE_REGS_USER) { | ||
4156 | u64 abi = data->regs_user.abi; | ||
4157 | |||
4158 | /* | ||
4159 | * If there are no regs to dump, notice it through | ||
4160 | * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). | ||
4161 | */ | ||
4162 | perf_output_put(handle, abi); | ||
4163 | |||
4164 | if (abi) { | ||
4165 | u64 mask = event->attr.sample_regs_user; | ||
4166 | perf_output_sample_regs(handle, | ||
4167 | data->regs_user.regs, | ||
4168 | mask); | ||
4169 | } | ||
4170 | } | ||
4171 | |||
4172 | if (sample_type & PERF_SAMPLE_STACK_USER) | ||
4173 | perf_output_sample_ustack(handle, | ||
4174 | data->stack_user_size, | ||
4175 | data->regs_user.regs); | ||
4019 | } | 4176 | } |
4020 | 4177 | ||
4021 | void perf_prepare_sample(struct perf_event_header *header, | 4178 | void perf_prepare_sample(struct perf_event_header *header, |
@@ -4067,6 +4224,49 @@ void perf_prepare_sample(struct perf_event_header *header, | |||
4067 | } | 4224 | } |
4068 | header->size += size; | 4225 | header->size += size; |
4069 | } | 4226 | } |
4227 | |||
4228 | if (sample_type & PERF_SAMPLE_REGS_USER) { | ||
4229 | /* regs dump ABI info */ | ||
4230 | int size = sizeof(u64); | ||
4231 | |||
4232 | perf_sample_regs_user(&data->regs_user, regs); | ||
4233 | |||
4234 | if (data->regs_user.regs) { | ||
4235 | u64 mask = event->attr.sample_regs_user; | ||
4236 | size += hweight64(mask) * sizeof(u64); | ||
4237 | } | ||
4238 | |||
4239 | header->size += size; | ||
4240 | } | ||
4241 | |||
4242 | if (sample_type & PERF_SAMPLE_STACK_USER) { | ||
4243 | /* | ||
4244 | * Either we need PERF_SAMPLE_STACK_USER bit to be allways | ||
4245 | * processed as the last one or have additional check added | ||
4246 | * in case new sample type is added, because we could eat | ||
4247 | * up the rest of the sample size. | ||
4248 | */ | ||
4249 | struct perf_regs_user *uregs = &data->regs_user; | ||
4250 | u16 stack_size = event->attr.sample_stack_user; | ||
4251 | u16 size = sizeof(u64); | ||
4252 | |||
4253 | if (!uregs->abi) | ||
4254 | perf_sample_regs_user(uregs, regs); | ||
4255 | |||
4256 | stack_size = perf_sample_ustack_size(stack_size, header->size, | ||
4257 | uregs->regs); | ||
4258 | |||
4259 | /* | ||
4260 | * If there is something to dump, add space for the dump | ||
4261 | * itself and for the field that tells the dynamic size, | ||
4262 | * which is how many have been actually dumped. | ||
4263 | */ | ||
4264 | if (stack_size) | ||
4265 | size += sizeof(u64) + stack_size; | ||
4266 | |||
4267 | data->stack_user_size = stack_size; | ||
4268 | header->size += size; | ||
4269 | } | ||
4070 | } | 4270 | } |
4071 | 4271 | ||
4072 | static void perf_event_output(struct perf_event *event, | 4272 | static void perf_event_output(struct perf_event *event, |
@@ -5950,6 +6150,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
5950 | 6150 | ||
5951 | mutex_init(&event->mmap_mutex); | 6151 | mutex_init(&event->mmap_mutex); |
5952 | 6152 | ||
6153 | atomic_long_set(&event->refcount, 1); | ||
5953 | event->cpu = cpu; | 6154 | event->cpu = cpu; |
5954 | event->attr = *attr; | 6155 | event->attr = *attr; |
5955 | event->group_leader = group_leader; | 6156 | event->group_leader = group_leader; |
@@ -6142,6 +6343,28 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, | |||
6142 | attr->branch_sample_type = mask; | 6343 | attr->branch_sample_type = mask; |
6143 | } | 6344 | } |
6144 | } | 6345 | } |
6346 | |||
6347 | if (attr->sample_type & PERF_SAMPLE_REGS_USER) { | ||
6348 | ret = perf_reg_validate(attr->sample_regs_user); | ||
6349 | if (ret) | ||
6350 | return ret; | ||
6351 | } | ||
6352 | |||
6353 | if (attr->sample_type & PERF_SAMPLE_STACK_USER) { | ||
6354 | if (!arch_perf_have_user_stack_dump()) | ||
6355 | return -ENOSYS; | ||
6356 | |||
6357 | /* | ||
6358 | * We have __u32 type for the size, but so far | ||
6359 | * we can only use __u16 as maximum due to the | ||
6360 | * __u16 sample size limit. | ||
6361 | */ | ||
6362 | if (attr->sample_stack_user >= USHRT_MAX) | ||
6363 | ret = -EINVAL; | ||
6364 | else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) | ||
6365 | ret = -EINVAL; | ||
6366 | } | ||
6367 | |||
6145 | out: | 6368 | out: |
6146 | return ret; | 6369 | return ret; |
6147 | 6370 | ||
@@ -6260,12 +6483,12 @@ SYSCALL_DEFINE5(perf_event_open, | |||
6260 | return event_fd; | 6483 | return event_fd; |
6261 | 6484 | ||
6262 | if (group_fd != -1) { | 6485 | if (group_fd != -1) { |
6263 | group_leader = perf_fget_light(group_fd, &fput_needed); | 6486 | group_file = perf_fget_light(group_fd, &fput_needed); |
6264 | if (IS_ERR(group_leader)) { | 6487 | if (IS_ERR(group_file)) { |
6265 | err = PTR_ERR(group_leader); | 6488 | err = PTR_ERR(group_file); |
6266 | goto err_fd; | 6489 | goto err_fd; |
6267 | } | 6490 | } |
6268 | group_file = group_leader->filp; | 6491 | group_leader = group_file->private_data; |
6269 | if (flags & PERF_FLAG_FD_OUTPUT) | 6492 | if (flags & PERF_FLAG_FD_OUTPUT) |
6270 | output_event = group_leader; | 6493 | output_event = group_leader; |
6271 | if (flags & PERF_FLAG_FD_NO_GROUP) | 6494 | if (flags & PERF_FLAG_FD_NO_GROUP) |
@@ -6402,7 +6625,6 @@ SYSCALL_DEFINE5(perf_event_open, | |||
6402 | put_ctx(gctx); | 6625 | put_ctx(gctx); |
6403 | } | 6626 | } |
6404 | 6627 | ||
6405 | event->filp = event_file; | ||
6406 | WARN_ON_ONCE(ctx->parent_ctx); | 6628 | WARN_ON_ONCE(ctx->parent_ctx); |
6407 | mutex_lock(&ctx->mutex); | 6629 | mutex_lock(&ctx->mutex); |
6408 | 6630 | ||
@@ -6496,7 +6718,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
6496 | goto err_free; | 6718 | goto err_free; |
6497 | } | 6719 | } |
6498 | 6720 | ||
6499 | event->filp = NULL; | ||
6500 | WARN_ON_ONCE(ctx->parent_ctx); | 6721 | WARN_ON_ONCE(ctx->parent_ctx); |
6501 | mutex_lock(&ctx->mutex); | 6722 | mutex_lock(&ctx->mutex); |
6502 | perf_install_in_context(ctx, event, cpu); | 6723 | perf_install_in_context(ctx, event, cpu); |
@@ -6578,7 +6799,7 @@ static void sync_child_event(struct perf_event *child_event, | |||
6578 | * Release the parent event, if this was the last | 6799 | * Release the parent event, if this was the last |
6579 | * reference to it. | 6800 | * reference to it. |
6580 | */ | 6801 | */ |
6581 | fput(parent_event->filp); | 6802 | put_event(parent_event); |
6582 | } | 6803 | } |
6583 | 6804 | ||
6584 | static void | 6805 | static void |
@@ -6654,9 +6875,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
6654 | * | 6875 | * |
6655 | * __perf_event_exit_task() | 6876 | * __perf_event_exit_task() |
6656 | * sync_child_event() | 6877 | * sync_child_event() |
6657 | * fput(parent_event->filp) | 6878 | * put_event() |
6658 | * perf_release() | 6879 | * mutex_lock(&ctx->mutex) |
6659 | * mutex_lock(&ctx->mutex) | ||
6660 | * | 6880 | * |
6661 | * But since its the parent context it won't be the same instance. | 6881 | * But since its the parent context it won't be the same instance. |
6662 | */ | 6882 | */ |
@@ -6724,7 +6944,7 @@ static void perf_free_event(struct perf_event *event, | |||
6724 | list_del_init(&event->child_list); | 6944 | list_del_init(&event->child_list); |
6725 | mutex_unlock(&parent->child_mutex); | 6945 | mutex_unlock(&parent->child_mutex); |
6726 | 6946 | ||
6727 | fput(parent->filp); | 6947 | put_event(parent); |
6728 | 6948 | ||
6729 | perf_group_detach(event); | 6949 | perf_group_detach(event); |
6730 | list_del_event(event, ctx); | 6950 | list_del_event(event, ctx); |
@@ -6804,6 +7024,12 @@ inherit_event(struct perf_event *parent_event, | |||
6804 | NULL, NULL); | 7024 | NULL, NULL); |
6805 | if (IS_ERR(child_event)) | 7025 | if (IS_ERR(child_event)) |
6806 | return child_event; | 7026 | return child_event; |
7027 | |||
7028 | if (!atomic_long_inc_not_zero(&parent_event->refcount)) { | ||
7029 | free_event(child_event); | ||
7030 | return NULL; | ||
7031 | } | ||
7032 | |||
6807 | get_ctx(child_ctx); | 7033 | get_ctx(child_ctx); |
6808 | 7034 | ||
6809 | /* | 7035 | /* |
@@ -6845,14 +7071,6 @@ inherit_event(struct perf_event *parent_event, | |||
6845 | raw_spin_unlock_irqrestore(&child_ctx->lock, flags); | 7071 | raw_spin_unlock_irqrestore(&child_ctx->lock, flags); |
6846 | 7072 | ||
6847 | /* | 7073 | /* |
6848 | * Get a reference to the parent filp - we will fput it | ||
6849 | * when the child event exits. This is safe to do because | ||
6850 | * we are in the parent and we know that the filp still | ||
6851 | * exists and has a nonzero count: | ||
6852 | */ | ||
6853 | atomic_long_inc(&parent_event->filp->f_count); | ||
6854 | |||
6855 | /* | ||
6856 | * Link this into the parent event's child list | 7074 | * Link this into the parent event's child list |
6857 | */ | 7075 | */ |
6858 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); | 7076 | WARN_ON_ONCE(parent_event->ctx->parent_ctx); |
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index bb38c4d3ee12..9a7b487c6fe2 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c | |||
@@ -453,7 +453,16 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att | |||
453 | int old_type = bp->attr.bp_type; | 453 | int old_type = bp->attr.bp_type; |
454 | int err = 0; | 454 | int err = 0; |
455 | 455 | ||
456 | perf_event_disable(bp); | 456 | /* |
457 | * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it | ||
458 | * will not be possible to raise IPIs that invoke __perf_event_disable. | ||
459 | * So call the function directly after making sure we are targeting the | ||
460 | * current task. | ||
461 | */ | ||
462 | if (irqs_disabled() && bp->ctx && bp->ctx->task == current) | ||
463 | __perf_event_disable(bp); | ||
464 | else | ||
465 | perf_event_disable(bp); | ||
457 | 466 | ||
458 | bp->attr.bp_addr = attr->bp_addr; | 467 | bp->attr.bp_addr = attr->bp_addr; |
459 | bp->attr.bp_type = attr->bp_type; | 468 | bp->attr.bp_type = attr->bp_type; |
diff --git a/kernel/events/internal.h b/kernel/events/internal.h index a096c19f2c2a..d56a64c99a8b 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _KERNEL_EVENTS_INTERNAL_H | 2 | #define _KERNEL_EVENTS_INTERNAL_H |
3 | 3 | ||
4 | #include <linux/hardirq.h> | 4 | #include <linux/hardirq.h> |
5 | #include <linux/uaccess.h> | ||
5 | 6 | ||
6 | /* Buffer handling */ | 7 | /* Buffer handling */ |
7 | 8 | ||
@@ -76,30 +77,53 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb) | |||
76 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); | 77 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); |
77 | } | 78 | } |
78 | 79 | ||
79 | static inline void | 80 | #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ |
80 | __output_copy(struct perf_output_handle *handle, | 81 | static inline unsigned int \ |
81 | const void *buf, unsigned int len) | 82 | func_name(struct perf_output_handle *handle, \ |
83 | const void *buf, unsigned int len) \ | ||
84 | { \ | ||
85 | unsigned long size, written; \ | ||
86 | \ | ||
87 | do { \ | ||
88 | size = min_t(unsigned long, handle->size, len); \ | ||
89 | \ | ||
90 | written = memcpy_func(handle->addr, buf, size); \ | ||
91 | \ | ||
92 | len -= written; \ | ||
93 | handle->addr += written; \ | ||
94 | buf += written; \ | ||
95 | handle->size -= written; \ | ||
96 | if (!handle->size) { \ | ||
97 | struct ring_buffer *rb = handle->rb; \ | ||
98 | \ | ||
99 | handle->page++; \ | ||
100 | handle->page &= rb->nr_pages - 1; \ | ||
101 | handle->addr = rb->data_pages[handle->page]; \ | ||
102 | handle->size = PAGE_SIZE << page_order(rb); \ | ||
103 | } \ | ||
104 | } while (len && written == size); \ | ||
105 | \ | ||
106 | return len; \ | ||
107 | } | ||
108 | |||
109 | static inline int memcpy_common(void *dst, const void *src, size_t n) | ||
82 | { | 110 | { |
83 | do { | 111 | memcpy(dst, src, n); |
84 | unsigned long size = min_t(unsigned long, handle->size, len); | 112 | return n; |
85 | |||
86 | memcpy(handle->addr, buf, size); | ||
87 | |||
88 | len -= size; | ||
89 | handle->addr += size; | ||
90 | buf += size; | ||
91 | handle->size -= size; | ||
92 | if (!handle->size) { | ||
93 | struct ring_buffer *rb = handle->rb; | ||
94 | |||
95 | handle->page++; | ||
96 | handle->page &= rb->nr_pages - 1; | ||
97 | handle->addr = rb->data_pages[handle->page]; | ||
98 | handle->size = PAGE_SIZE << page_order(rb); | ||
99 | } | ||
100 | } while (len); | ||
101 | } | 113 | } |
102 | 114 | ||
115 | DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) | ||
116 | |||
117 | #define MEMCPY_SKIP(dst, src, n) (n) | ||
118 | |||
119 | DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP) | ||
120 | |||
121 | #ifndef arch_perf_out_copy_user | ||
122 | #define arch_perf_out_copy_user __copy_from_user_inatomic | ||
123 | #endif | ||
124 | |||
125 | DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) | ||
126 | |||
103 | /* Callchain handling */ | 127 | /* Callchain handling */ |
104 | extern struct perf_callchain_entry * | 128 | extern struct perf_callchain_entry * |
105 | perf_callchain(struct perf_event *event, struct pt_regs *regs); | 129 | perf_callchain(struct perf_event *event, struct pt_regs *regs); |
@@ -134,4 +158,20 @@ static inline void put_recursion_context(int *recursion, int rctx) | |||
134 | recursion[rctx]--; | 158 | recursion[rctx]--; |
135 | } | 159 | } |
136 | 160 | ||
161 | #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP | ||
162 | static inline bool arch_perf_have_user_stack_dump(void) | ||
163 | { | ||
164 | return true; | ||
165 | } | ||
166 | |||
167 | #define perf_user_stack_pointer(regs) user_stack_pointer(regs) | ||
168 | #else | ||
169 | static inline bool arch_perf_have_user_stack_dump(void) | ||
170 | { | ||
171 | return false; | ||
172 | } | ||
173 | |||
174 | #define perf_user_stack_pointer(regs) 0 | ||
175 | #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ | ||
176 | |||
137 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ | 177 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 6ddaba43fb7a..23cb34ff3973 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -182,10 +182,16 @@ out: | |||
182 | return -ENOSPC; | 182 | return -ENOSPC; |
183 | } | 183 | } |
184 | 184 | ||
185 | void perf_output_copy(struct perf_output_handle *handle, | 185 | unsigned int perf_output_copy(struct perf_output_handle *handle, |
186 | const void *buf, unsigned int len) | 186 | const void *buf, unsigned int len) |
187 | { | 187 | { |
188 | __output_copy(handle, buf, len); | 188 | return __output_copy(handle, buf, len); |
189 | } | ||
190 | |||
191 | unsigned int perf_output_skip(struct perf_output_handle *handle, | ||
192 | unsigned int len) | ||
193 | { | ||
194 | return __output_skip(handle, NULL, len); | ||
189 | } | 195 | } |
190 | 196 | ||
191 | void perf_output_end(struct perf_output_handle *handle) | 197 | void perf_output_end(struct perf_output_handle *handle) |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c08a22d02f72..912ef48d28ab 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -280,12 +280,10 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_ | |||
280 | if (ret <= 0) | 280 | if (ret <= 0) |
281 | return ret; | 281 | return ret; |
282 | 282 | ||
283 | lock_page(page); | ||
284 | vaddr_new = kmap_atomic(page); | 283 | vaddr_new = kmap_atomic(page); |
285 | vaddr &= ~PAGE_MASK; | 284 | vaddr &= ~PAGE_MASK; |
286 | memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE); | 285 | memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE); |
287 | kunmap_atomic(vaddr_new); | 286 | kunmap_atomic(vaddr_new); |
288 | unlock_page(page); | ||
289 | 287 | ||
290 | put_page(page); | 288 | put_page(page); |
291 | 289 | ||
@@ -334,7 +332,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned | |||
334 | */ | 332 | */ |
335 | result = is_swbp_at_addr(mm, vaddr); | 333 | result = is_swbp_at_addr(mm, vaddr); |
336 | if (result == 1) | 334 | if (result == 1) |
337 | return -EEXIST; | 335 | return 0; |
338 | 336 | ||
339 | if (result) | 337 | if (result) |
340 | return result; | 338 | return result; |
@@ -347,24 +345,22 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned | |||
347 | * @mm: the probed process address space. | 345 | * @mm: the probed process address space. |
348 | * @auprobe: arch specific probepoint information. | 346 | * @auprobe: arch specific probepoint information. |
349 | * @vaddr: the virtual address to insert the opcode. | 347 | * @vaddr: the virtual address to insert the opcode. |
350 | * @verify: if true, verify existance of breakpoint instruction. | ||
351 | * | 348 | * |
352 | * For mm @mm, restore the original opcode (opcode) at @vaddr. | 349 | * For mm @mm, restore the original opcode (opcode) at @vaddr. |
353 | * Return 0 (success) or a negative errno. | 350 | * Return 0 (success) or a negative errno. |
354 | */ | 351 | */ |
355 | int __weak | 352 | int __weak |
356 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify) | 353 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
357 | { | 354 | { |
358 | if (verify) { | 355 | int result; |
359 | int result; | 356 | |
357 | result = is_swbp_at_addr(mm, vaddr); | ||
358 | if (!result) | ||
359 | return -EINVAL; | ||
360 | 360 | ||
361 | result = is_swbp_at_addr(mm, vaddr); | 361 | if (result != 1) |
362 | if (!result) | 362 | return result; |
363 | return -EINVAL; | ||
364 | 363 | ||
365 | if (result != 1) | ||
366 | return result; | ||
367 | } | ||
368 | return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); | 364 | return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); |
369 | } | 365 | } |
370 | 366 | ||
@@ -415,11 +411,10 @@ static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) | |||
415 | static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) | 411 | static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) |
416 | { | 412 | { |
417 | struct uprobe *uprobe; | 413 | struct uprobe *uprobe; |
418 | unsigned long flags; | ||
419 | 414 | ||
420 | spin_lock_irqsave(&uprobes_treelock, flags); | 415 | spin_lock(&uprobes_treelock); |
421 | uprobe = __find_uprobe(inode, offset); | 416 | uprobe = __find_uprobe(inode, offset); |
422 | spin_unlock_irqrestore(&uprobes_treelock, flags); | 417 | spin_unlock(&uprobes_treelock); |
423 | 418 | ||
424 | return uprobe; | 419 | return uprobe; |
425 | } | 420 | } |
@@ -466,12 +461,11 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe) | |||
466 | */ | 461 | */ |
467 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) | 462 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) |
468 | { | 463 | { |
469 | unsigned long flags; | ||
470 | struct uprobe *u; | 464 | struct uprobe *u; |
471 | 465 | ||
472 | spin_lock_irqsave(&uprobes_treelock, flags); | 466 | spin_lock(&uprobes_treelock); |
473 | u = __insert_uprobe(uprobe); | 467 | u = __insert_uprobe(uprobe); |
474 | spin_unlock_irqrestore(&uprobes_treelock, flags); | 468 | spin_unlock(&uprobes_treelock); |
475 | 469 | ||
476 | /* For now assume that the instruction need not be single-stepped */ | 470 | /* For now assume that the instruction need not be single-stepped */ |
477 | uprobe->flags |= UPROBE_SKIP_SSTEP; | 471 | uprobe->flags |= UPROBE_SKIP_SSTEP; |
@@ -649,6 +643,7 @@ static int | |||
649 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | 643 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, |
650 | struct vm_area_struct *vma, unsigned long vaddr) | 644 | struct vm_area_struct *vma, unsigned long vaddr) |
651 | { | 645 | { |
646 | bool first_uprobe; | ||
652 | int ret; | 647 | int ret; |
653 | 648 | ||
654 | /* | 649 | /* |
@@ -659,7 +654,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | |||
659 | * Hence behave as if probe already existed. | 654 | * Hence behave as if probe already existed. |
660 | */ | 655 | */ |
661 | if (!uprobe->consumers) | 656 | if (!uprobe->consumers) |
662 | return -EEXIST; | 657 | return 0; |
663 | 658 | ||
664 | if (!(uprobe->flags & UPROBE_COPY_INSN)) { | 659 | if (!(uprobe->flags & UPROBE_COPY_INSN)) { |
665 | ret = copy_insn(uprobe, vma->vm_file); | 660 | ret = copy_insn(uprobe, vma->vm_file); |
@@ -681,17 +676,18 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | |||
681 | } | 676 | } |
682 | 677 | ||
683 | /* | 678 | /* |
684 | * Ideally, should be updating the probe count after the breakpoint | 679 | * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), |
685 | * has been successfully inserted. However a thread could hit the | 680 | * the task can hit this breakpoint right after __replace_page(). |
686 | * breakpoint we just inserted even before the probe count is | ||
687 | * incremented. If this is the first breakpoint placed, breakpoint | ||
688 | * notifier might ignore uprobes and pass the trap to the thread. | ||
689 | * Hence increment before and decrement on failure. | ||
690 | */ | 681 | */ |
691 | atomic_inc(&mm->uprobes_state.count); | 682 | first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); |
683 | if (first_uprobe) | ||
684 | set_bit(MMF_HAS_UPROBES, &mm->flags); | ||
685 | |||
692 | ret = set_swbp(&uprobe->arch, mm, vaddr); | 686 | ret = set_swbp(&uprobe->arch, mm, vaddr); |
693 | if (ret) | 687 | if (!ret) |
694 | atomic_dec(&mm->uprobes_state.count); | 688 | clear_bit(MMF_RECALC_UPROBES, &mm->flags); |
689 | else if (first_uprobe) | ||
690 | clear_bit(MMF_HAS_UPROBES, &mm->flags); | ||
695 | 691 | ||
696 | return ret; | 692 | return ret; |
697 | } | 693 | } |
@@ -699,8 +695,12 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | |||
699 | static void | 695 | static void |
700 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) | 696 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) |
701 | { | 697 | { |
702 | if (!set_orig_insn(&uprobe->arch, mm, vaddr, true)) | 698 | /* can happen if uprobe_register() fails */ |
703 | atomic_dec(&mm->uprobes_state.count); | 699 | if (!test_bit(MMF_HAS_UPROBES, &mm->flags)) |
700 | return; | ||
701 | |||
702 | set_bit(MMF_RECALC_UPROBES, &mm->flags); | ||
703 | set_orig_insn(&uprobe->arch, mm, vaddr); | ||
704 | } | 704 | } |
705 | 705 | ||
706 | /* | 706 | /* |
@@ -710,11 +710,9 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad | |||
710 | */ | 710 | */ |
711 | static void delete_uprobe(struct uprobe *uprobe) | 711 | static void delete_uprobe(struct uprobe *uprobe) |
712 | { | 712 | { |
713 | unsigned long flags; | 713 | spin_lock(&uprobes_treelock); |
714 | |||
715 | spin_lock_irqsave(&uprobes_treelock, flags); | ||
716 | rb_erase(&uprobe->rb_node, &uprobes_tree); | 714 | rb_erase(&uprobe->rb_node, &uprobes_tree); |
717 | spin_unlock_irqrestore(&uprobes_treelock, flags); | 715 | spin_unlock(&uprobes_treelock); |
718 | iput(uprobe->inode); | 716 | iput(uprobe->inode); |
719 | put_uprobe(uprobe); | 717 | put_uprobe(uprobe); |
720 | atomic_dec(&uprobe_events); | 718 | atomic_dec(&uprobe_events); |
@@ -831,17 +829,11 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) | |||
831 | vaddr_to_offset(vma, info->vaddr) != uprobe->offset) | 829 | vaddr_to_offset(vma, info->vaddr) != uprobe->offset) |
832 | goto unlock; | 830 | goto unlock; |
833 | 831 | ||
834 | if (is_register) { | 832 | if (is_register) |
835 | err = install_breakpoint(uprobe, mm, vma, info->vaddr); | 833 | err = install_breakpoint(uprobe, mm, vma, info->vaddr); |
836 | /* | 834 | else |
837 | * We can race against uprobe_mmap(), see the | ||
838 | * comment near uprobe_hash(). | ||
839 | */ | ||
840 | if (err == -EEXIST) | ||
841 | err = 0; | ||
842 | } else { | ||
843 | remove_breakpoint(uprobe, mm, info->vaddr); | 835 | remove_breakpoint(uprobe, mm, info->vaddr); |
844 | } | 836 | |
845 | unlock: | 837 | unlock: |
846 | up_write(&mm->mmap_sem); | 838 | up_write(&mm->mmap_sem); |
847 | free: | 839 | free: |
@@ -908,7 +900,8 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * | |||
908 | } | 900 | } |
909 | 901 | ||
910 | mutex_unlock(uprobes_hash(inode)); | 902 | mutex_unlock(uprobes_hash(inode)); |
911 | put_uprobe(uprobe); | 903 | if (uprobe) |
904 | put_uprobe(uprobe); | ||
912 | 905 | ||
913 | return ret; | 906 | return ret; |
914 | } | 907 | } |
@@ -978,7 +971,6 @@ static void build_probe_list(struct inode *inode, | |||
978 | struct list_head *head) | 971 | struct list_head *head) |
979 | { | 972 | { |
980 | loff_t min, max; | 973 | loff_t min, max; |
981 | unsigned long flags; | ||
982 | struct rb_node *n, *t; | 974 | struct rb_node *n, *t; |
983 | struct uprobe *u; | 975 | struct uprobe *u; |
984 | 976 | ||
@@ -986,7 +978,7 @@ static void build_probe_list(struct inode *inode, | |||
986 | min = vaddr_to_offset(vma, start); | 978 | min = vaddr_to_offset(vma, start); |
987 | max = min + (end - start) - 1; | 979 | max = min + (end - start) - 1; |
988 | 980 | ||
989 | spin_lock_irqsave(&uprobes_treelock, flags); | 981 | spin_lock(&uprobes_treelock); |
990 | n = find_node_in_range(inode, min, max); | 982 | n = find_node_in_range(inode, min, max); |
991 | if (n) { | 983 | if (n) { |
992 | for (t = n; t; t = rb_prev(t)) { | 984 | for (t = n; t; t = rb_prev(t)) { |
@@ -1004,27 +996,20 @@ static void build_probe_list(struct inode *inode, | |||
1004 | atomic_inc(&u->ref); | 996 | atomic_inc(&u->ref); |
1005 | } | 997 | } |
1006 | } | 998 | } |
1007 | spin_unlock_irqrestore(&uprobes_treelock, flags); | 999 | spin_unlock(&uprobes_treelock); |
1008 | } | 1000 | } |
1009 | 1001 | ||
1010 | /* | 1002 | /* |
1011 | * Called from mmap_region. | 1003 | * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. |
1012 | * called with mm->mmap_sem acquired. | ||
1013 | * | 1004 | * |
1014 | * Return -ve no if we fail to insert probes and we cannot | 1005 | * Currently we ignore all errors and always return 0, the callers |
1015 | * bail-out. | 1006 | * can't handle the failure anyway. |
1016 | * Return 0 otherwise. i.e: | ||
1017 | * | ||
1018 | * - successful insertion of probes | ||
1019 | * - (or) no possible probes to be inserted. | ||
1020 | * - (or) insertion of probes failed but we can bail-out. | ||
1021 | */ | 1007 | */ |
1022 | int uprobe_mmap(struct vm_area_struct *vma) | 1008 | int uprobe_mmap(struct vm_area_struct *vma) |
1023 | { | 1009 | { |
1024 | struct list_head tmp_list; | 1010 | struct list_head tmp_list; |
1025 | struct uprobe *uprobe, *u; | 1011 | struct uprobe *uprobe, *u; |
1026 | struct inode *inode; | 1012 | struct inode *inode; |
1027 | int ret, count; | ||
1028 | 1013 | ||
1029 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) | 1014 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) |
1030 | return 0; | 1015 | return 0; |
@@ -1036,44 +1021,35 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1036 | mutex_lock(uprobes_mmap_hash(inode)); | 1021 | mutex_lock(uprobes_mmap_hash(inode)); |
1037 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); | 1022 | build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); |
1038 | 1023 | ||
1039 | ret = 0; | ||
1040 | count = 0; | ||
1041 | |||
1042 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { | 1024 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
1043 | if (!ret) { | 1025 | if (!fatal_signal_pending(current)) { |
1044 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); | 1026 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); |
1045 | 1027 | install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); | |
1046 | ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); | ||
1047 | /* | ||
1048 | * We can race against uprobe_register(), see the | ||
1049 | * comment near uprobe_hash(). | ||
1050 | */ | ||
1051 | if (ret == -EEXIST) { | ||
1052 | ret = 0; | ||
1053 | |||
1054 | if (!is_swbp_at_addr(vma->vm_mm, vaddr)) | ||
1055 | continue; | ||
1056 | |||
1057 | /* | ||
1058 | * Unable to insert a breakpoint, but | ||
1059 | * breakpoint lies underneath. Increment the | ||
1060 | * probe count. | ||
1061 | */ | ||
1062 | atomic_inc(&vma->vm_mm->uprobes_state.count); | ||
1063 | } | ||
1064 | |||
1065 | if (!ret) | ||
1066 | count++; | ||
1067 | } | 1028 | } |
1068 | put_uprobe(uprobe); | 1029 | put_uprobe(uprobe); |
1069 | } | 1030 | } |
1070 | |||
1071 | mutex_unlock(uprobes_mmap_hash(inode)); | 1031 | mutex_unlock(uprobes_mmap_hash(inode)); |
1072 | 1032 | ||
1073 | if (ret) | 1033 | return 0; |
1074 | atomic_sub(count, &vma->vm_mm->uprobes_state.count); | 1034 | } |
1075 | 1035 | ||
1076 | return ret; | 1036 | static bool |
1037 | vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
1038 | { | ||
1039 | loff_t min, max; | ||
1040 | struct inode *inode; | ||
1041 | struct rb_node *n; | ||
1042 | |||
1043 | inode = vma->vm_file->f_mapping->host; | ||
1044 | |||
1045 | min = vaddr_to_offset(vma, start); | ||
1046 | max = min + (end - start) - 1; | ||
1047 | |||
1048 | spin_lock(&uprobes_treelock); | ||
1049 | n = find_node_in_range(inode, min, max); | ||
1050 | spin_unlock(&uprobes_treelock); | ||
1051 | |||
1052 | return !!n; | ||
1077 | } | 1053 | } |
1078 | 1054 | ||
1079 | /* | 1055 | /* |
@@ -1081,37 +1057,18 @@ int uprobe_mmap(struct vm_area_struct *vma) | |||
1081 | */ | 1057 | */ |
1082 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 1058 | void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1083 | { | 1059 | { |
1084 | struct list_head tmp_list; | ||
1085 | struct uprobe *uprobe, *u; | ||
1086 | struct inode *inode; | ||
1087 | |||
1088 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) | 1060 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) |
1089 | return; | 1061 | return; |
1090 | 1062 | ||
1091 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ | 1063 | if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ |
1092 | return; | 1064 | return; |
1093 | 1065 | ||
1094 | if (!atomic_read(&vma->vm_mm->uprobes_state.count)) | 1066 | if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || |
1095 | return; | 1067 | test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) |
1096 | |||
1097 | inode = vma->vm_file->f_mapping->host; | ||
1098 | if (!inode) | ||
1099 | return; | 1068 | return; |
1100 | 1069 | ||
1101 | mutex_lock(uprobes_mmap_hash(inode)); | 1070 | if (vma_has_uprobes(vma, start, end)) |
1102 | build_probe_list(inode, vma, start, end, &tmp_list); | 1071 | set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); |
1103 | |||
1104 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { | ||
1105 | unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); | ||
1106 | /* | ||
1107 | * An unregister could have removed the probe before | ||
1108 | * unmap. So check before we decrement the count. | ||
1109 | */ | ||
1110 | if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) | ||
1111 | atomic_dec(&vma->vm_mm->uprobes_state.count); | ||
1112 | put_uprobe(uprobe); | ||
1113 | } | ||
1114 | mutex_unlock(uprobes_mmap_hash(inode)); | ||
1115 | } | 1072 | } |
1116 | 1073 | ||
1117 | /* Slot allocation for XOL */ | 1074 | /* Slot allocation for XOL */ |
@@ -1213,13 +1170,15 @@ void uprobe_clear_state(struct mm_struct *mm) | |||
1213 | kfree(area); | 1170 | kfree(area); |
1214 | } | 1171 | } |
1215 | 1172 | ||
1216 | /* | 1173 | void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) |
1217 | * uprobe_reset_state - Free the area allocated for slots. | ||
1218 | */ | ||
1219 | void uprobe_reset_state(struct mm_struct *mm) | ||
1220 | { | 1174 | { |
1221 | mm->uprobes_state.xol_area = NULL; | 1175 | newmm->uprobes_state.xol_area = NULL; |
1222 | atomic_set(&mm->uprobes_state.count, 0); | 1176 | |
1177 | if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { | ||
1178 | set_bit(MMF_HAS_UPROBES, &newmm->flags); | ||
1179 | /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ | ||
1180 | set_bit(MMF_RECALC_UPROBES, &newmm->flags); | ||
1181 | } | ||
1223 | } | 1182 | } |
1224 | 1183 | ||
1225 | /* | 1184 | /* |
@@ -1437,6 +1396,25 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) | |||
1437 | return false; | 1396 | return false; |
1438 | } | 1397 | } |
1439 | 1398 | ||
1399 | static void mmf_recalc_uprobes(struct mm_struct *mm) | ||
1400 | { | ||
1401 | struct vm_area_struct *vma; | ||
1402 | |||
1403 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
1404 | if (!valid_vma(vma, false)) | ||
1405 | continue; | ||
1406 | /* | ||
1407 | * This is not strictly accurate, we can race with | ||
1408 | * uprobe_unregister() and see the already removed | ||
1409 | * uprobe if delete_uprobe() was not yet called. | ||
1410 | */ | ||
1411 | if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) | ||
1412 | return; | ||
1413 | } | ||
1414 | |||
1415 | clear_bit(MMF_HAS_UPROBES, &mm->flags); | ||
1416 | } | ||
1417 | |||
1440 | static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) | 1418 | static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) |
1441 | { | 1419 | { |
1442 | struct mm_struct *mm = current->mm; | 1420 | struct mm_struct *mm = current->mm; |
@@ -1458,11 +1436,24 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) | |||
1458 | } else { | 1436 | } else { |
1459 | *is_swbp = -EFAULT; | 1437 | *is_swbp = -EFAULT; |
1460 | } | 1438 | } |
1439 | |||
1440 | if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) | ||
1441 | mmf_recalc_uprobes(mm); | ||
1461 | up_read(&mm->mmap_sem); | 1442 | up_read(&mm->mmap_sem); |
1462 | 1443 | ||
1463 | return uprobe; | 1444 | return uprobe; |
1464 | } | 1445 | } |
1465 | 1446 | ||
1447 | void __weak arch_uprobe_enable_step(struct arch_uprobe *arch) | ||
1448 | { | ||
1449 | user_enable_single_step(current); | ||
1450 | } | ||
1451 | |||
1452 | void __weak arch_uprobe_disable_step(struct arch_uprobe *arch) | ||
1453 | { | ||
1454 | user_disable_single_step(current); | ||
1455 | } | ||
1456 | |||
1466 | /* | 1457 | /* |
1467 | * Run handler and ask thread to singlestep. | 1458 | * Run handler and ask thread to singlestep. |
1468 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. | 1459 | * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. |
@@ -1509,7 +1500,7 @@ static void handle_swbp(struct pt_regs *regs) | |||
1509 | 1500 | ||
1510 | utask->state = UTASK_SSTEP; | 1501 | utask->state = UTASK_SSTEP; |
1511 | if (!pre_ssout(uprobe, regs, bp_vaddr)) { | 1502 | if (!pre_ssout(uprobe, regs, bp_vaddr)) { |
1512 | user_enable_single_step(current); | 1503 | arch_uprobe_enable_step(&uprobe->arch); |
1513 | return; | 1504 | return; |
1514 | } | 1505 | } |
1515 | 1506 | ||
@@ -1518,17 +1509,15 @@ cleanup_ret: | |||
1518 | utask->active_uprobe = NULL; | 1509 | utask->active_uprobe = NULL; |
1519 | utask->state = UTASK_RUNNING; | 1510 | utask->state = UTASK_RUNNING; |
1520 | } | 1511 | } |
1521 | if (uprobe) { | 1512 | if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) |
1522 | if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) | ||
1523 | 1513 | ||
1524 | /* | 1514 | /* |
1525 | * cannot singlestep; cannot skip instruction; | 1515 | * cannot singlestep; cannot skip instruction; |
1526 | * re-execute the instruction. | 1516 | * re-execute the instruction. |
1527 | */ | 1517 | */ |
1528 | instruction_pointer_set(regs, bp_vaddr); | 1518 | instruction_pointer_set(regs, bp_vaddr); |
1529 | 1519 | ||
1530 | put_uprobe(uprobe); | 1520 | put_uprobe(uprobe); |
1531 | } | ||
1532 | } | 1521 | } |
1533 | 1522 | ||
1534 | /* | 1523 | /* |
@@ -1547,10 +1536,10 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) | |||
1547 | else | 1536 | else |
1548 | WARN_ON_ONCE(1); | 1537 | WARN_ON_ONCE(1); |
1549 | 1538 | ||
1539 | arch_uprobe_disable_step(&uprobe->arch); | ||
1550 | put_uprobe(uprobe); | 1540 | put_uprobe(uprobe); |
1551 | utask->active_uprobe = NULL; | 1541 | utask->active_uprobe = NULL; |
1552 | utask->state = UTASK_RUNNING; | 1542 | utask->state = UTASK_RUNNING; |
1553 | user_disable_single_step(current); | ||
1554 | xol_free_insn_slot(current); | 1543 | xol_free_insn_slot(current); |
1555 | 1544 | ||
1556 | spin_lock_irq(¤t->sighand->siglock); | 1545 | spin_lock_irq(¤t->sighand->siglock); |
@@ -1589,8 +1578,7 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs) | |||
1589 | { | 1578 | { |
1590 | struct uprobe_task *utask; | 1579 | struct uprobe_task *utask; |
1591 | 1580 | ||
1592 | if (!current->mm || !atomic_read(¤t->mm->uprobes_state.count)) | 1581 | if (!current->mm || !test_bit(MMF_HAS_UPROBES, ¤t->mm->flags)) |
1593 | /* task is currently not uprobed */ | ||
1594 | return 0; | 1582 | return 0; |
1595 | 1583 | ||
1596 | utask = current->utask; | 1584 | utask = current->utask; |
diff --git a/kernel/fork.c b/kernel/fork.c index 3bd2280d79f6..5a0e74d89a5a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -353,6 +353,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
353 | 353 | ||
354 | down_write(&oldmm->mmap_sem); | 354 | down_write(&oldmm->mmap_sem); |
355 | flush_cache_dup_mm(oldmm); | 355 | flush_cache_dup_mm(oldmm); |
356 | uprobe_dup_mmap(oldmm, mm); | ||
356 | /* | 357 | /* |
357 | * Not linked in yet - no deadlock potential: | 358 | * Not linked in yet - no deadlock potential: |
358 | */ | 359 | */ |
@@ -454,9 +455,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
454 | 455 | ||
455 | if (retval) | 456 | if (retval) |
456 | goto out; | 457 | goto out; |
457 | |||
458 | if (file && uprobe_mmap(tmp)) | ||
459 | goto out; | ||
460 | } | 458 | } |
461 | /* a new mm has just been created */ | 459 | /* a new mm has just been created */ |
462 | arch_dup_mmap(oldmm, mm); | 460 | arch_dup_mmap(oldmm, mm); |
@@ -839,8 +837,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk) | |||
839 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 837 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
840 | mm->pmd_huge_pte = NULL; | 838 | mm->pmd_huge_pte = NULL; |
841 | #endif | 839 | #endif |
842 | uprobe_reset_state(mm); | ||
843 | |||
844 | if (!mm_init(mm, tsk)) | 840 | if (!mm_init(mm, tsk)) |
845 | goto fail_nomem; | 841 | goto fail_nomem; |
846 | 842 | ||
@@ -1280,11 +1276,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1280 | #endif | 1276 | #endif |
1281 | #ifdef CONFIG_TRACE_IRQFLAGS | 1277 | #ifdef CONFIG_TRACE_IRQFLAGS |
1282 | p->irq_events = 0; | 1278 | p->irq_events = 0; |
1283 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1284 | p->hardirqs_enabled = 1; | ||
1285 | #else | ||
1286 | p->hardirqs_enabled = 0; | 1279 | p->hardirqs_enabled = 0; |
1287 | #endif | ||
1288 | p->hardirq_enable_ip = 0; | 1280 | p->hardirq_enable_ip = 0; |
1289 | p->hardirq_enable_event = 0; | 1281 | p->hardirq_enable_event = 0; |
1290 | p->hardirq_disable_ip = _THIS_IP_; | 1282 | p->hardirq_disable_ip = _THIS_IP_; |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index eebd6d5cfb44..57d86d07221e 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -671,6 +671,7 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |||
671 | irq_set_chip(irq, chip); | 671 | irq_set_chip(irq, chip); |
672 | __irq_set_handler(irq, handle, 0, name); | 672 | __irq_set_handler(irq, handle, 0, name); |
673 | } | 673 | } |
674 | EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); | ||
674 | 675 | ||
675 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) | 676 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
676 | { | 677 | { |
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index b5fcd96c7102..988dc58e8847 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | #include <linux/interrupt.h> | 7 | #include <linux/interrupt.h> |
8 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
9 | #include <linux/export.h> | ||
9 | 10 | ||
10 | #include "internals.h" | 11 | #include "internals.h" |
11 | 12 | ||
@@ -57,3 +58,4 @@ struct irq_chip dummy_irq_chip = { | |||
57 | .irq_mask = noop, | 58 | .irq_mask = noop, |
58 | .irq_unmask = noop, | 59 | .irq_unmask = noop, |
59 | }; | 60 | }; |
61 | EXPORT_SYMBOL_GPL(dummy_irq_chip); | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index c62b8546cc90..098f396aa409 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -561,9 +561,9 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
561 | { | 561 | { |
562 | LIST_HEAD(free_list); | 562 | LIST_HEAD(free_list); |
563 | 563 | ||
564 | mutex_lock(&kprobe_mutex); | ||
564 | /* Lock modules while optimizing kprobes */ | 565 | /* Lock modules while optimizing kprobes */ |
565 | mutex_lock(&module_mutex); | 566 | mutex_lock(&module_mutex); |
566 | mutex_lock(&kprobe_mutex); | ||
567 | 567 | ||
568 | /* | 568 | /* |
569 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) | 569 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) |
@@ -586,8 +586,8 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
586 | /* Step 4: Free cleaned kprobes after quiesence period */ | 586 | /* Step 4: Free cleaned kprobes after quiesence period */ |
587 | do_free_cleaned_kprobes(&free_list); | 587 | do_free_cleaned_kprobes(&free_list); |
588 | 588 | ||
589 | mutex_unlock(&kprobe_mutex); | ||
590 | mutex_unlock(&module_mutex); | 589 | mutex_unlock(&module_mutex); |
590 | mutex_unlock(&kprobe_mutex); | ||
591 | 591 | ||
592 | /* Step 5: Kick optimizer again if needed */ | 592 | /* Step 5: Kick optimizer again if needed */ |
593 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) | 593 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) |
@@ -759,20 +759,32 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) | |||
759 | struct kprobe *ap; | 759 | struct kprobe *ap; |
760 | struct optimized_kprobe *op; | 760 | struct optimized_kprobe *op; |
761 | 761 | ||
762 | /* Impossible to optimize ftrace-based kprobe */ | ||
763 | if (kprobe_ftrace(p)) | ||
764 | return; | ||
765 | |||
766 | /* For preparing optimization, jump_label_text_reserved() is called */ | ||
767 | jump_label_lock(); | ||
768 | mutex_lock(&text_mutex); | ||
769 | |||
762 | ap = alloc_aggr_kprobe(p); | 770 | ap = alloc_aggr_kprobe(p); |
763 | if (!ap) | 771 | if (!ap) |
764 | return; | 772 | goto out; |
765 | 773 | ||
766 | op = container_of(ap, struct optimized_kprobe, kp); | 774 | op = container_of(ap, struct optimized_kprobe, kp); |
767 | if (!arch_prepared_optinsn(&op->optinsn)) { | 775 | if (!arch_prepared_optinsn(&op->optinsn)) { |
768 | /* If failed to setup optimizing, fallback to kprobe */ | 776 | /* If failed to setup optimizing, fallback to kprobe */ |
769 | arch_remove_optimized_kprobe(op); | 777 | arch_remove_optimized_kprobe(op); |
770 | kfree(op); | 778 | kfree(op); |
771 | return; | 779 | goto out; |
772 | } | 780 | } |
773 | 781 | ||
774 | init_aggr_kprobe(ap, p); | 782 | init_aggr_kprobe(ap, p); |
775 | optimize_kprobe(ap); | 783 | optimize_kprobe(ap); /* This just kicks optimizer thread */ |
784 | |||
785 | out: | ||
786 | mutex_unlock(&text_mutex); | ||
787 | jump_label_unlock(); | ||
776 | } | 788 | } |
777 | 789 | ||
778 | #ifdef CONFIG_SYSCTL | 790 | #ifdef CONFIG_SYSCTL |
@@ -907,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) | |||
907 | } | 919 | } |
908 | #endif /* CONFIG_OPTPROBES */ | 920 | #endif /* CONFIG_OPTPROBES */ |
909 | 921 | ||
922 | #ifdef KPROBES_CAN_USE_FTRACE | ||
923 | static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { | ||
924 | .func = kprobe_ftrace_handler, | ||
925 | .flags = FTRACE_OPS_FL_SAVE_REGS, | ||
926 | }; | ||
927 | static int kprobe_ftrace_enabled; | ||
928 | |||
929 | /* Must ensure p->addr is really on ftrace */ | ||
930 | static int __kprobes prepare_kprobe(struct kprobe *p) | ||
931 | { | ||
932 | if (!kprobe_ftrace(p)) | ||
933 | return arch_prepare_kprobe(p); | ||
934 | |||
935 | return arch_prepare_kprobe_ftrace(p); | ||
936 | } | ||
937 | |||
938 | /* Caller must lock kprobe_mutex */ | ||
939 | static void __kprobes arm_kprobe_ftrace(struct kprobe *p) | ||
940 | { | ||
941 | int ret; | ||
942 | |||
943 | ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, | ||
944 | (unsigned long)p->addr, 0, 0); | ||
945 | WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); | ||
946 | kprobe_ftrace_enabled++; | ||
947 | if (kprobe_ftrace_enabled == 1) { | ||
948 | ret = register_ftrace_function(&kprobe_ftrace_ops); | ||
949 | WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); | ||
950 | } | ||
951 | } | ||
952 | |||
953 | /* Caller must lock kprobe_mutex */ | ||
954 | static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) | ||
955 | { | ||
956 | int ret; | ||
957 | |||
958 | kprobe_ftrace_enabled--; | ||
959 | if (kprobe_ftrace_enabled == 0) { | ||
960 | ret = unregister_ftrace_function(&kprobe_ftrace_ops); | ||
961 | WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); | ||
962 | } | ||
963 | ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, | ||
964 | (unsigned long)p->addr, 1, 0); | ||
965 | WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); | ||
966 | } | ||
967 | #else /* !KPROBES_CAN_USE_FTRACE */ | ||
968 | #define prepare_kprobe(p) arch_prepare_kprobe(p) | ||
969 | #define arm_kprobe_ftrace(p) do {} while (0) | ||
970 | #define disarm_kprobe_ftrace(p) do {} while (0) | ||
971 | #endif | ||
972 | |||
910 | /* Arm a kprobe with text_mutex */ | 973 | /* Arm a kprobe with text_mutex */ |
911 | static void __kprobes arm_kprobe(struct kprobe *kp) | 974 | static void __kprobes arm_kprobe(struct kprobe *kp) |
912 | { | 975 | { |
976 | if (unlikely(kprobe_ftrace(kp))) { | ||
977 | arm_kprobe_ftrace(kp); | ||
978 | return; | ||
979 | } | ||
913 | /* | 980 | /* |
914 | * Here, since __arm_kprobe() doesn't use stop_machine(), | 981 | * Here, since __arm_kprobe() doesn't use stop_machine(), |
915 | * this doesn't cause deadlock on text_mutex. So, we don't | 982 | * this doesn't cause deadlock on text_mutex. So, we don't |
@@ -921,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp) | |||
921 | } | 988 | } |
922 | 989 | ||
923 | /* Disarm a kprobe with text_mutex */ | 990 | /* Disarm a kprobe with text_mutex */ |
924 | static void __kprobes disarm_kprobe(struct kprobe *kp) | 991 | static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt) |
925 | { | 992 | { |
993 | if (unlikely(kprobe_ftrace(kp))) { | ||
994 | disarm_kprobe_ftrace(kp); | ||
995 | return; | ||
996 | } | ||
926 | /* Ditto */ | 997 | /* Ditto */ |
927 | mutex_lock(&text_mutex); | 998 | mutex_lock(&text_mutex); |
928 | __disarm_kprobe(kp, true); | 999 | __disarm_kprobe(kp, reopt); |
929 | mutex_unlock(&text_mutex); | 1000 | mutex_unlock(&text_mutex); |
930 | } | 1001 | } |
931 | 1002 | ||
@@ -1144,12 +1215,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
1144 | if (p->post_handler && !ap->post_handler) | 1215 | if (p->post_handler && !ap->post_handler) |
1145 | ap->post_handler = aggr_post_handler; | 1216 | ap->post_handler = aggr_post_handler; |
1146 | 1217 | ||
1147 | if (kprobe_disabled(ap) && !kprobe_disabled(p)) { | ||
1148 | ap->flags &= ~KPROBE_FLAG_DISABLED; | ||
1149 | if (!kprobes_all_disarmed) | ||
1150 | /* Arm the breakpoint again. */ | ||
1151 | __arm_kprobe(ap); | ||
1152 | } | ||
1153 | return 0; | 1218 | return 0; |
1154 | } | 1219 | } |
1155 | 1220 | ||
@@ -1189,11 +1254,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, | |||
1189 | int ret = 0; | 1254 | int ret = 0; |
1190 | struct kprobe *ap = orig_p; | 1255 | struct kprobe *ap = orig_p; |
1191 | 1256 | ||
1257 | /* For preparing optimization, jump_label_text_reserved() is called */ | ||
1258 | jump_label_lock(); | ||
1259 | /* | ||
1260 | * Get online CPUs to avoid text_mutex deadlock.with stop machine, | ||
1261 | * which is invoked by unoptimize_kprobe() in add_new_kprobe() | ||
1262 | */ | ||
1263 | get_online_cpus(); | ||
1264 | mutex_lock(&text_mutex); | ||
1265 | |||
1192 | if (!kprobe_aggrprobe(orig_p)) { | 1266 | if (!kprobe_aggrprobe(orig_p)) { |
1193 | /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ | 1267 | /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ |
1194 | ap = alloc_aggr_kprobe(orig_p); | 1268 | ap = alloc_aggr_kprobe(orig_p); |
1195 | if (!ap) | 1269 | if (!ap) { |
1196 | return -ENOMEM; | 1270 | ret = -ENOMEM; |
1271 | goto out; | ||
1272 | } | ||
1197 | init_aggr_kprobe(ap, orig_p); | 1273 | init_aggr_kprobe(ap, orig_p); |
1198 | } else if (kprobe_unused(ap)) | 1274 | } else if (kprobe_unused(ap)) |
1199 | /* This probe is going to die. Rescue it */ | 1275 | /* This probe is going to die. Rescue it */ |
@@ -1213,7 +1289,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, | |||
1213 | * free aggr_probe. It will be used next time, or | 1289 | * free aggr_probe. It will be used next time, or |
1214 | * freed by unregister_kprobe. | 1290 | * freed by unregister_kprobe. |
1215 | */ | 1291 | */ |
1216 | return ret; | 1292 | goto out; |
1217 | 1293 | ||
1218 | /* Prepare optimized instructions if possible. */ | 1294 | /* Prepare optimized instructions if possible. */ |
1219 | prepare_optimized_kprobe(ap); | 1295 | prepare_optimized_kprobe(ap); |
@@ -1228,7 +1304,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, | |||
1228 | 1304 | ||
1229 | /* Copy ap's insn slot to p */ | 1305 | /* Copy ap's insn slot to p */ |
1230 | copy_kprobe(ap, p); | 1306 | copy_kprobe(ap, p); |
1231 | return add_new_kprobe(ap, p); | 1307 | ret = add_new_kprobe(ap, p); |
1308 | |||
1309 | out: | ||
1310 | mutex_unlock(&text_mutex); | ||
1311 | put_online_cpus(); | ||
1312 | jump_label_unlock(); | ||
1313 | |||
1314 | if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { | ||
1315 | ap->flags &= ~KPROBE_FLAG_DISABLED; | ||
1316 | if (!kprobes_all_disarmed) | ||
1317 | /* Arm the breakpoint again. */ | ||
1318 | arm_kprobe(ap); | ||
1319 | } | ||
1320 | return ret; | ||
1232 | } | 1321 | } |
1233 | 1322 | ||
1234 | static int __kprobes in_kprobes_functions(unsigned long addr) | 1323 | static int __kprobes in_kprobes_functions(unsigned long addr) |
@@ -1313,71 +1402,96 @@ static inline int check_kprobe_rereg(struct kprobe *p) | |||
1313 | return ret; | 1402 | return ret; |
1314 | } | 1403 | } |
1315 | 1404 | ||
1316 | int __kprobes register_kprobe(struct kprobe *p) | 1405 | static __kprobes int check_kprobe_address_safe(struct kprobe *p, |
1406 | struct module **probed_mod) | ||
1317 | { | 1407 | { |
1318 | int ret = 0; | 1408 | int ret = 0; |
1319 | struct kprobe *old_p; | 1409 | unsigned long ftrace_addr; |
1320 | struct module *probed_mod; | ||
1321 | kprobe_opcode_t *addr; | ||
1322 | |||
1323 | addr = kprobe_addr(p); | ||
1324 | if (IS_ERR(addr)) | ||
1325 | return PTR_ERR(addr); | ||
1326 | p->addr = addr; | ||
1327 | 1410 | ||
1328 | ret = check_kprobe_rereg(p); | 1411 | /* |
1329 | if (ret) | 1412 | * If the address is located on a ftrace nop, set the |
1330 | return ret; | 1413 | * breakpoint to the following instruction. |
1414 | */ | ||
1415 | ftrace_addr = ftrace_location((unsigned long)p->addr); | ||
1416 | if (ftrace_addr) { | ||
1417 | #ifdef KPROBES_CAN_USE_FTRACE | ||
1418 | /* Given address is not on the instruction boundary */ | ||
1419 | if ((unsigned long)p->addr != ftrace_addr) | ||
1420 | return -EILSEQ; | ||
1421 | p->flags |= KPROBE_FLAG_FTRACE; | ||
1422 | #else /* !KPROBES_CAN_USE_FTRACE */ | ||
1423 | return -EINVAL; | ||
1424 | #endif | ||
1425 | } | ||
1331 | 1426 | ||
1332 | jump_label_lock(); | 1427 | jump_label_lock(); |
1333 | preempt_disable(); | 1428 | preempt_disable(); |
1429 | |||
1430 | /* Ensure it is not in reserved area nor out of text */ | ||
1334 | if (!kernel_text_address((unsigned long) p->addr) || | 1431 | if (!kernel_text_address((unsigned long) p->addr) || |
1335 | in_kprobes_functions((unsigned long) p->addr) || | 1432 | in_kprobes_functions((unsigned long) p->addr) || |
1336 | ftrace_text_reserved(p->addr, p->addr) || | ||
1337 | jump_label_text_reserved(p->addr, p->addr)) { | 1433 | jump_label_text_reserved(p->addr, p->addr)) { |
1338 | ret = -EINVAL; | 1434 | ret = -EINVAL; |
1339 | goto cannot_probe; | 1435 | goto out; |
1340 | } | 1436 | } |
1341 | 1437 | ||
1342 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ | 1438 | /* Check if are we probing a module */ |
1343 | p->flags &= KPROBE_FLAG_DISABLED; | 1439 | *probed_mod = __module_text_address((unsigned long) p->addr); |
1344 | 1440 | if (*probed_mod) { | |
1345 | /* | ||
1346 | * Check if are we probing a module. | ||
1347 | */ | ||
1348 | probed_mod = __module_text_address((unsigned long) p->addr); | ||
1349 | if (probed_mod) { | ||
1350 | /* Return -ENOENT if fail. */ | ||
1351 | ret = -ENOENT; | ||
1352 | /* | 1441 | /* |
1353 | * We must hold a refcount of the probed module while updating | 1442 | * We must hold a refcount of the probed module while updating |
1354 | * its code to prohibit unexpected unloading. | 1443 | * its code to prohibit unexpected unloading. |
1355 | */ | 1444 | */ |
1356 | if (unlikely(!try_module_get(probed_mod))) | 1445 | if (unlikely(!try_module_get(*probed_mod))) { |
1357 | goto cannot_probe; | 1446 | ret = -ENOENT; |
1447 | goto out; | ||
1448 | } | ||
1358 | 1449 | ||
1359 | /* | 1450 | /* |
1360 | * If the module freed .init.text, we couldn't insert | 1451 | * If the module freed .init.text, we couldn't insert |
1361 | * kprobes in there. | 1452 | * kprobes in there. |
1362 | */ | 1453 | */ |
1363 | if (within_module_init((unsigned long)p->addr, probed_mod) && | 1454 | if (within_module_init((unsigned long)p->addr, *probed_mod) && |
1364 | probed_mod->state != MODULE_STATE_COMING) { | 1455 | (*probed_mod)->state != MODULE_STATE_COMING) { |
1365 | module_put(probed_mod); | 1456 | module_put(*probed_mod); |
1366 | goto cannot_probe; | 1457 | *probed_mod = NULL; |
1458 | ret = -ENOENT; | ||
1367 | } | 1459 | } |
1368 | /* ret will be updated by following code */ | ||
1369 | } | 1460 | } |
1461 | out: | ||
1370 | preempt_enable(); | 1462 | preempt_enable(); |
1371 | jump_label_unlock(); | 1463 | jump_label_unlock(); |
1372 | 1464 | ||
1465 | return ret; | ||
1466 | } | ||
1467 | |||
1468 | int __kprobes register_kprobe(struct kprobe *p) | ||
1469 | { | ||
1470 | int ret; | ||
1471 | struct kprobe *old_p; | ||
1472 | struct module *probed_mod; | ||
1473 | kprobe_opcode_t *addr; | ||
1474 | |||
1475 | /* Adjust probe address from symbol */ | ||
1476 | addr = kprobe_addr(p); | ||
1477 | if (IS_ERR(addr)) | ||
1478 | return PTR_ERR(addr); | ||
1479 | p->addr = addr; | ||
1480 | |||
1481 | ret = check_kprobe_rereg(p); | ||
1482 | if (ret) | ||
1483 | return ret; | ||
1484 | |||
1485 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ | ||
1486 | p->flags &= KPROBE_FLAG_DISABLED; | ||
1373 | p->nmissed = 0; | 1487 | p->nmissed = 0; |
1374 | INIT_LIST_HEAD(&p->list); | 1488 | INIT_LIST_HEAD(&p->list); |
1375 | mutex_lock(&kprobe_mutex); | ||
1376 | 1489 | ||
1377 | jump_label_lock(); /* needed to call jump_label_text_reserved() */ | 1490 | ret = check_kprobe_address_safe(p, &probed_mod); |
1491 | if (ret) | ||
1492 | return ret; | ||
1378 | 1493 | ||
1379 | get_online_cpus(); /* For avoiding text_mutex deadlock. */ | 1494 | mutex_lock(&kprobe_mutex); |
1380 | mutex_lock(&text_mutex); | ||
1381 | 1495 | ||
1382 | old_p = get_kprobe(p->addr); | 1496 | old_p = get_kprobe(p->addr); |
1383 | if (old_p) { | 1497 | if (old_p) { |
@@ -1386,7 +1500,9 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1386 | goto out; | 1500 | goto out; |
1387 | } | 1501 | } |
1388 | 1502 | ||
1389 | ret = arch_prepare_kprobe(p); | 1503 | mutex_lock(&text_mutex); /* Avoiding text modification */ |
1504 | ret = prepare_kprobe(p); | ||
1505 | mutex_unlock(&text_mutex); | ||
1390 | if (ret) | 1506 | if (ret) |
1391 | goto out; | 1507 | goto out; |
1392 | 1508 | ||
@@ -1395,26 +1511,18 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
1395 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 1511 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
1396 | 1512 | ||
1397 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) | 1513 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) |
1398 | __arm_kprobe(p); | 1514 | arm_kprobe(p); |
1399 | 1515 | ||
1400 | /* Try to optimize kprobe */ | 1516 | /* Try to optimize kprobe */ |
1401 | try_to_optimize_kprobe(p); | 1517 | try_to_optimize_kprobe(p); |
1402 | 1518 | ||
1403 | out: | 1519 | out: |
1404 | mutex_unlock(&text_mutex); | ||
1405 | put_online_cpus(); | ||
1406 | jump_label_unlock(); | ||
1407 | mutex_unlock(&kprobe_mutex); | 1520 | mutex_unlock(&kprobe_mutex); |
1408 | 1521 | ||
1409 | if (probed_mod) | 1522 | if (probed_mod) |
1410 | module_put(probed_mod); | 1523 | module_put(probed_mod); |
1411 | 1524 | ||
1412 | return ret; | 1525 | return ret; |
1413 | |||
1414 | cannot_probe: | ||
1415 | preempt_enable(); | ||
1416 | jump_label_unlock(); | ||
1417 | return ret; | ||
1418 | } | 1526 | } |
1419 | EXPORT_SYMBOL_GPL(register_kprobe); | 1527 | EXPORT_SYMBOL_GPL(register_kprobe); |
1420 | 1528 | ||
@@ -1451,7 +1559,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) | |||
1451 | 1559 | ||
1452 | /* Try to disarm and disable this/parent probe */ | 1560 | /* Try to disarm and disable this/parent probe */ |
1453 | if (p == orig_p || aggr_kprobe_disabled(orig_p)) { | 1561 | if (p == orig_p || aggr_kprobe_disabled(orig_p)) { |
1454 | disarm_kprobe(orig_p); | 1562 | disarm_kprobe(orig_p, true); |
1455 | orig_p->flags |= KPROBE_FLAG_DISABLED; | 1563 | orig_p->flags |= KPROBE_FLAG_DISABLED; |
1456 | } | 1564 | } |
1457 | } | 1565 | } |
@@ -2049,10 +2157,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | |||
2049 | 2157 | ||
2050 | if (!pp) | 2158 | if (!pp) |
2051 | pp = p; | 2159 | pp = p; |
2052 | seq_printf(pi, "%s%s%s\n", | 2160 | seq_printf(pi, "%s%s%s%s\n", |
2053 | (kprobe_gone(p) ? "[GONE]" : ""), | 2161 | (kprobe_gone(p) ? "[GONE]" : ""), |
2054 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), | 2162 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), |
2055 | (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); | 2163 | (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), |
2164 | (kprobe_ftrace(pp) ? "[FTRACE]" : "")); | ||
2056 | } | 2165 | } |
2057 | 2166 | ||
2058 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | 2167 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
@@ -2131,14 +2240,12 @@ static void __kprobes arm_all_kprobes(void) | |||
2131 | goto already_enabled; | 2240 | goto already_enabled; |
2132 | 2241 | ||
2133 | /* Arming kprobes doesn't optimize kprobe itself */ | 2242 | /* Arming kprobes doesn't optimize kprobe itself */ |
2134 | mutex_lock(&text_mutex); | ||
2135 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2243 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2136 | head = &kprobe_table[i]; | 2244 | head = &kprobe_table[i]; |
2137 | hlist_for_each_entry_rcu(p, node, head, hlist) | 2245 | hlist_for_each_entry_rcu(p, node, head, hlist) |
2138 | if (!kprobe_disabled(p)) | 2246 | if (!kprobe_disabled(p)) |
2139 | __arm_kprobe(p); | 2247 | arm_kprobe(p); |
2140 | } | 2248 | } |
2141 | mutex_unlock(&text_mutex); | ||
2142 | 2249 | ||
2143 | kprobes_all_disarmed = false; | 2250 | kprobes_all_disarmed = false; |
2144 | printk(KERN_INFO "Kprobes globally enabled\n"); | 2251 | printk(KERN_INFO "Kprobes globally enabled\n"); |
@@ -2166,15 +2273,13 @@ static void __kprobes disarm_all_kprobes(void) | |||
2166 | kprobes_all_disarmed = true; | 2273 | kprobes_all_disarmed = true; |
2167 | printk(KERN_INFO "Kprobes globally disabled\n"); | 2274 | printk(KERN_INFO "Kprobes globally disabled\n"); |
2168 | 2275 | ||
2169 | mutex_lock(&text_mutex); | ||
2170 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2276 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2171 | head = &kprobe_table[i]; | 2277 | head = &kprobe_table[i]; |
2172 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 2278 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
2173 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) | 2279 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
2174 | __disarm_kprobe(p, false); | 2280 | disarm_kprobe(p, false); |
2175 | } | 2281 | } |
2176 | } | 2282 | } |
2177 | mutex_unlock(&text_mutex); | ||
2178 | mutex_unlock(&kprobe_mutex); | 2283 | mutex_unlock(&kprobe_mutex); |
2179 | 2284 | ||
2180 | /* Wait for disarming all kprobes by optimizer */ | 2285 | /* Wait for disarming all kprobes by optimizer */ |
diff --git a/kernel/kthread.c b/kernel/kthread.c index b579af57ea10..146a6fa96825 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -37,11 +37,20 @@ struct kthread_create_info | |||
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct kthread { | 39 | struct kthread { |
40 | int should_stop; | 40 | unsigned long flags; |
41 | unsigned int cpu; | ||
41 | void *data; | 42 | void *data; |
43 | struct completion parked; | ||
42 | struct completion exited; | 44 | struct completion exited; |
43 | }; | 45 | }; |
44 | 46 | ||
47 | enum KTHREAD_BITS { | ||
48 | KTHREAD_IS_PER_CPU = 0, | ||
49 | KTHREAD_SHOULD_STOP, | ||
50 | KTHREAD_SHOULD_PARK, | ||
51 | KTHREAD_IS_PARKED, | ||
52 | }; | ||
53 | |||
45 | #define to_kthread(tsk) \ | 54 | #define to_kthread(tsk) \ |
46 | container_of((tsk)->vfork_done, struct kthread, exited) | 55 | container_of((tsk)->vfork_done, struct kthread, exited) |
47 | 56 | ||
@@ -52,13 +61,29 @@ struct kthread { | |||
52 | * and this will return true. You should then return, and your return | 61 | * and this will return true. You should then return, and your return |
53 | * value will be passed through to kthread_stop(). | 62 | * value will be passed through to kthread_stop(). |
54 | */ | 63 | */ |
55 | int kthread_should_stop(void) | 64 | bool kthread_should_stop(void) |
56 | { | 65 | { |
57 | return to_kthread(current)->should_stop; | 66 | return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); |
58 | } | 67 | } |
59 | EXPORT_SYMBOL(kthread_should_stop); | 68 | EXPORT_SYMBOL(kthread_should_stop); |
60 | 69 | ||
61 | /** | 70 | /** |
71 | * kthread_should_park - should this kthread park now? | ||
72 | * | ||
73 | * When someone calls kthread_park() on your kthread, it will be woken | ||
74 | * and this will return true. You should then do the necessary | ||
75 | * cleanup and call kthread_parkme() | ||
76 | * | ||
77 | * Similar to kthread_should_stop(), but this keeps the thread alive | ||
78 | * and in a park position. kthread_unpark() "restarts" the thread and | ||
79 | * calls the thread function again. | ||
80 | */ | ||
81 | bool kthread_should_park(void) | ||
82 | { | ||
83 | return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); | ||
84 | } | ||
85 | |||
86 | /** | ||
62 | * kthread_freezable_should_stop - should this freezable kthread return now? | 87 | * kthread_freezable_should_stop - should this freezable kthread return now? |
63 | * @was_frozen: optional out parameter, indicates whether %current was frozen | 88 | * @was_frozen: optional out parameter, indicates whether %current was frozen |
64 | * | 89 | * |
@@ -96,6 +121,24 @@ void *kthread_data(struct task_struct *task) | |||
96 | return to_kthread(task)->data; | 121 | return to_kthread(task)->data; |
97 | } | 122 | } |
98 | 123 | ||
124 | static void __kthread_parkme(struct kthread *self) | ||
125 | { | ||
126 | __set_current_state(TASK_INTERRUPTIBLE); | ||
127 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { | ||
128 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) | ||
129 | complete(&self->parked); | ||
130 | schedule(); | ||
131 | __set_current_state(TASK_INTERRUPTIBLE); | ||
132 | } | ||
133 | clear_bit(KTHREAD_IS_PARKED, &self->flags); | ||
134 | __set_current_state(TASK_RUNNING); | ||
135 | } | ||
136 | |||
137 | void kthread_parkme(void) | ||
138 | { | ||
139 | __kthread_parkme(to_kthread(current)); | ||
140 | } | ||
141 | |||
99 | static int kthread(void *_create) | 142 | static int kthread(void *_create) |
100 | { | 143 | { |
101 | /* Copy data: it's on kthread's stack */ | 144 | /* Copy data: it's on kthread's stack */ |
@@ -105,9 +148,10 @@ static int kthread(void *_create) | |||
105 | struct kthread self; | 148 | struct kthread self; |
106 | int ret; | 149 | int ret; |
107 | 150 | ||
108 | self.should_stop = 0; | 151 | self.flags = 0; |
109 | self.data = data; | 152 | self.data = data; |
110 | init_completion(&self.exited); | 153 | init_completion(&self.exited); |
154 | init_completion(&self.parked); | ||
111 | current->vfork_done = &self.exited; | 155 | current->vfork_done = &self.exited; |
112 | 156 | ||
113 | /* OK, tell user we're spawned, wait for stop or wakeup */ | 157 | /* OK, tell user we're spawned, wait for stop or wakeup */ |
@@ -117,9 +161,11 @@ static int kthread(void *_create) | |||
117 | schedule(); | 161 | schedule(); |
118 | 162 | ||
119 | ret = -EINTR; | 163 | ret = -EINTR; |
120 | if (!self.should_stop) | ||
121 | ret = threadfn(data); | ||
122 | 164 | ||
165 | if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { | ||
166 | __kthread_parkme(&self); | ||
167 | ret = threadfn(data); | ||
168 | } | ||
123 | /* we can't just return, we must preserve "self" on stack */ | 169 | /* we can't just return, we must preserve "self" on stack */ |
124 | do_exit(ret); | 170 | do_exit(ret); |
125 | } | 171 | } |
@@ -172,8 +218,7 @@ static void create_kthread(struct kthread_create_info *create) | |||
172 | * Returns a task_struct or ERR_PTR(-ENOMEM). | 218 | * Returns a task_struct or ERR_PTR(-ENOMEM). |
173 | */ | 219 | */ |
174 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | 220 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), |
175 | void *data, | 221 | void *data, int node, |
176 | int node, | ||
177 | const char namefmt[], | 222 | const char namefmt[], |
178 | ...) | 223 | ...) |
179 | { | 224 | { |
@@ -210,6 +255,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | |||
210 | } | 255 | } |
211 | EXPORT_SYMBOL(kthread_create_on_node); | 256 | EXPORT_SYMBOL(kthread_create_on_node); |
212 | 257 | ||
258 | static void __kthread_bind(struct task_struct *p, unsigned int cpu) | ||
259 | { | ||
260 | /* It's safe because the task is inactive. */ | ||
261 | do_set_cpus_allowed(p, cpumask_of(cpu)); | ||
262 | p->flags |= PF_THREAD_BOUND; | ||
263 | } | ||
264 | |||
213 | /** | 265 | /** |
214 | * kthread_bind - bind a just-created kthread to a cpu. | 266 | * kthread_bind - bind a just-created kthread to a cpu. |
215 | * @p: thread created by kthread_create(). | 267 | * @p: thread created by kthread_create(). |
@@ -226,14 +278,112 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) | |||
226 | WARN_ON(1); | 278 | WARN_ON(1); |
227 | return; | 279 | return; |
228 | } | 280 | } |
229 | 281 | __kthread_bind(p, cpu); | |
230 | /* It's safe because the task is inactive. */ | ||
231 | do_set_cpus_allowed(p, cpumask_of(cpu)); | ||
232 | p->flags |= PF_THREAD_BOUND; | ||
233 | } | 282 | } |
234 | EXPORT_SYMBOL(kthread_bind); | 283 | EXPORT_SYMBOL(kthread_bind); |
235 | 284 | ||
236 | /** | 285 | /** |
286 | * kthread_create_on_cpu - Create a cpu bound kthread | ||
287 | * @threadfn: the function to run until signal_pending(current). | ||
288 | * @data: data ptr for @threadfn. | ||
289 | * @cpu: The cpu on which the thread should be bound, | ||
290 | * @namefmt: printf-style name for the thread. Format is restricted | ||
291 | * to "name.*%u". Code fills in cpu number. | ||
292 | * | ||
293 | * Description: This helper function creates and names a kernel thread | ||
294 | * The thread will be woken and put into park mode. | ||
295 | */ | ||
296 | struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), | ||
297 | void *data, unsigned int cpu, | ||
298 | const char *namefmt) | ||
299 | { | ||
300 | struct task_struct *p; | ||
301 | |||
302 | p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, | ||
303 | cpu); | ||
304 | if (IS_ERR(p)) | ||
305 | return p; | ||
306 | set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); | ||
307 | to_kthread(p)->cpu = cpu; | ||
308 | /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */ | ||
309 | kthread_park(p); | ||
310 | return p; | ||
311 | } | ||
312 | |||
313 | static struct kthread *task_get_live_kthread(struct task_struct *k) | ||
314 | { | ||
315 | struct kthread *kthread; | ||
316 | |||
317 | get_task_struct(k); | ||
318 | kthread = to_kthread(k); | ||
319 | /* It might have exited */ | ||
320 | barrier(); | ||
321 | if (k->vfork_done != NULL) | ||
322 | return kthread; | ||
323 | return NULL; | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * kthread_unpark - unpark a thread created by kthread_create(). | ||
328 | * @k: thread created by kthread_create(). | ||
329 | * | ||
330 | * Sets kthread_should_park() for @k to return false, wakes it, and | ||
331 | * waits for it to return. If the thread is marked percpu then its | ||
332 | * bound to the cpu again. | ||
333 | */ | ||
334 | void kthread_unpark(struct task_struct *k) | ||
335 | { | ||
336 | struct kthread *kthread = task_get_live_kthread(k); | ||
337 | |||
338 | if (kthread) { | ||
339 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | ||
340 | /* | ||
341 | * We clear the IS_PARKED bit here as we don't wait | ||
342 | * until the task has left the park code. So if we'd | ||
343 | * park before that happens we'd see the IS_PARKED bit | ||
344 | * which might be about to be cleared. | ||
345 | */ | ||
346 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | ||
347 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | ||
348 | __kthread_bind(k, kthread->cpu); | ||
349 | wake_up_process(k); | ||
350 | } | ||
351 | } | ||
352 | put_task_struct(k); | ||
353 | } | ||
354 | |||
355 | /** | ||
356 | * kthread_park - park a thread created by kthread_create(). | ||
357 | * @k: thread created by kthread_create(). | ||
358 | * | ||
359 | * Sets kthread_should_park() for @k to return true, wakes it, and | ||
360 | * waits for it to return. This can also be called after kthread_create() | ||
361 | * instead of calling wake_up_process(): the thread will park without | ||
362 | * calling threadfn(). | ||
363 | * | ||
364 | * Returns 0 if the thread is parked, -ENOSYS if the thread exited. | ||
365 | * If called by the kthread itself just the park bit is set. | ||
366 | */ | ||
367 | int kthread_park(struct task_struct *k) | ||
368 | { | ||
369 | struct kthread *kthread = task_get_live_kthread(k); | ||
370 | int ret = -ENOSYS; | ||
371 | |||
372 | if (kthread) { | ||
373 | if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | ||
374 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | ||
375 | if (k != current) { | ||
376 | wake_up_process(k); | ||
377 | wait_for_completion(&kthread->parked); | ||
378 | } | ||
379 | } | ||
380 | ret = 0; | ||
381 | } | ||
382 | put_task_struct(k); | ||
383 | return ret; | ||
384 | } | ||
385 | |||
386 | /** | ||
237 | * kthread_stop - stop a thread created by kthread_create(). | 387 | * kthread_stop - stop a thread created by kthread_create(). |
238 | * @k: thread created by kthread_create(). | 388 | * @k: thread created by kthread_create(). |
239 | * | 389 | * |
@@ -250,16 +400,13 @@ EXPORT_SYMBOL(kthread_bind); | |||
250 | */ | 400 | */ |
251 | int kthread_stop(struct task_struct *k) | 401 | int kthread_stop(struct task_struct *k) |
252 | { | 402 | { |
253 | struct kthread *kthread; | 403 | struct kthread *kthread = task_get_live_kthread(k); |
254 | int ret; | 404 | int ret; |
255 | 405 | ||
256 | trace_sched_kthread_stop(k); | 406 | trace_sched_kthread_stop(k); |
257 | get_task_struct(k); | 407 | if (kthread) { |
258 | 408 | set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); | |
259 | kthread = to_kthread(k); | 409 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
260 | barrier(); /* it might have exited */ | ||
261 | if (k->vfork_done != NULL) { | ||
262 | kthread->should_stop = 1; | ||
263 | wake_up_process(k); | 410 | wake_up_process(k); |
264 | wait_for_completion(&kthread->exited); | 411 | wait_for_completion(&kthread->exited); |
265 | } | 412 | } |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index ea9ee4518c35..7981e5b2350d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -2998,6 +2998,42 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); | |||
2998 | 2998 | ||
2999 | struct lock_class_key __lockdep_no_validate__; | 2999 | struct lock_class_key __lockdep_no_validate__; |
3000 | 3000 | ||
3001 | static int | ||
3002 | print_lock_nested_lock_not_held(struct task_struct *curr, | ||
3003 | struct held_lock *hlock, | ||
3004 | unsigned long ip) | ||
3005 | { | ||
3006 | if (!debug_locks_off()) | ||
3007 | return 0; | ||
3008 | if (debug_locks_silent) | ||
3009 | return 0; | ||
3010 | |||
3011 | printk("\n"); | ||
3012 | printk("==================================\n"); | ||
3013 | printk("[ BUG: Nested lock was not taken ]\n"); | ||
3014 | print_kernel_ident(); | ||
3015 | printk("----------------------------------\n"); | ||
3016 | |||
3017 | printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); | ||
3018 | print_lock(hlock); | ||
3019 | |||
3020 | printk("\nbut this task is not holding:\n"); | ||
3021 | printk("%s\n", hlock->nest_lock->name); | ||
3022 | |||
3023 | printk("\nstack backtrace:\n"); | ||
3024 | dump_stack(); | ||
3025 | |||
3026 | printk("\nother info that might help us debug this:\n"); | ||
3027 | lockdep_print_held_locks(curr); | ||
3028 | |||
3029 | printk("\nstack backtrace:\n"); | ||
3030 | dump_stack(); | ||
3031 | |||
3032 | return 0; | ||
3033 | } | ||
3034 | |||
3035 | static int __lock_is_held(struct lockdep_map *lock); | ||
3036 | |||
3001 | /* | 3037 | /* |
3002 | * This gets called for every mutex_lock*()/spin_lock*() operation. | 3038 | * This gets called for every mutex_lock*()/spin_lock*() operation. |
3003 | * We maintain the dependency maps and validate the locking attempt: | 3039 | * We maintain the dependency maps and validate the locking attempt: |
@@ -3139,6 +3175,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3139 | } | 3175 | } |
3140 | chain_key = iterate_chain_key(chain_key, id); | 3176 | chain_key = iterate_chain_key(chain_key, id); |
3141 | 3177 | ||
3178 | if (nest_lock && !__lock_is_held(nest_lock)) | ||
3179 | return print_lock_nested_lock_not_held(curr, hlock, ip); | ||
3180 | |||
3142 | if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) | 3181 | if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) |
3143 | return 0; | 3182 | return 0; |
3144 | 3183 | ||
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index b3c7fd554250..6144bab8fd8e 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -232,15 +232,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write, | |||
232 | */ | 232 | */ |
233 | 233 | ||
234 | tmp.data = ¤t->nsproxy->pid_ns->last_pid; | 234 | tmp.data = ¤t->nsproxy->pid_ns->last_pid; |
235 | return proc_dointvec(&tmp, write, buffer, lenp, ppos); | 235 | return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); |
236 | } | 236 | } |
237 | 237 | ||
238 | extern int pid_max; | ||
239 | static int zero = 0; | ||
238 | static struct ctl_table pid_ns_ctl_table[] = { | 240 | static struct ctl_table pid_ns_ctl_table[] = { |
239 | { | 241 | { |
240 | .procname = "ns_last_pid", | 242 | .procname = "ns_last_pid", |
241 | .maxlen = sizeof(int), | 243 | .maxlen = sizeof(int), |
242 | .mode = 0666, /* permissions are checked in the handler */ | 244 | .mode = 0666, /* permissions are checked in the handler */ |
243 | .proc_handler = pid_ns_ctl_handler, | 245 | .proc_handler = pid_ns_ctl_handler, |
246 | .extra1 = &zero, | ||
247 | .extra2 = &pid_max, | ||
244 | }, | 248 | }, |
245 | { } | 249 | { } |
246 | }; | 250 | }; |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 4e6a61b15e86..29ca1c6da594 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
46 | #include <linux/export.h> | 46 | #include <linux/export.h> |
47 | #include <linux/hardirq.h> | 47 | #include <linux/hardirq.h> |
48 | #include <linux/delay.h> | ||
48 | 49 | ||
49 | #define CREATE_TRACE_POINTS | 50 | #define CREATE_TRACE_POINTS |
50 | #include <trace/events/rcu.h> | 51 | #include <trace/events/rcu.h> |
@@ -81,6 +82,9 @@ void __rcu_read_unlock(void) | |||
81 | } else { | 82 | } else { |
82 | barrier(); /* critical section before exit code. */ | 83 | barrier(); /* critical section before exit code. */ |
83 | t->rcu_read_lock_nesting = INT_MIN; | 84 | t->rcu_read_lock_nesting = INT_MIN; |
85 | #ifdef CONFIG_PROVE_RCU_DELAY | ||
86 | udelay(10); /* Make preemption more probable. */ | ||
87 | #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ | ||
84 | barrier(); /* assign before ->rcu_read_unlock_special load */ | 88 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
85 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | 89 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
86 | rcu_read_unlock_special(t); | 90 | rcu_read_unlock_special(t); |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 547b1fe5b052..e4c6a598d6f7 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -56,25 +56,28 @@ static void __call_rcu(struct rcu_head *head, | |||
56 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 56 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
57 | 57 | ||
58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ | 58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ |
59 | static void rcu_idle_enter_common(long long oldval) | 59 | static void rcu_idle_enter_common(long long newval) |
60 | { | 60 | { |
61 | if (rcu_dynticks_nesting) { | 61 | if (newval) { |
62 | RCU_TRACE(trace_rcu_dyntick("--=", | 62 | RCU_TRACE(trace_rcu_dyntick("--=", |
63 | oldval, rcu_dynticks_nesting)); | 63 | rcu_dynticks_nesting, newval)); |
64 | rcu_dynticks_nesting = newval; | ||
64 | return; | 65 | return; |
65 | } | 66 | } |
66 | RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); | 67 | RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval)); |
67 | if (!is_idle_task(current)) { | 68 | if (!is_idle_task(current)) { |
68 | struct task_struct *idle = idle_task(smp_processor_id()); | 69 | struct task_struct *idle = idle_task(smp_processor_id()); |
69 | 70 | ||
70 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", | 71 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", |
71 | oldval, rcu_dynticks_nesting)); | 72 | rcu_dynticks_nesting, newval)); |
72 | ftrace_dump(DUMP_ALL); | 73 | ftrace_dump(DUMP_ALL); |
73 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", | 74 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
74 | current->pid, current->comm, | 75 | current->pid, current->comm, |
75 | idle->pid, idle->comm); /* must be idle task! */ | 76 | idle->pid, idle->comm); /* must be idle task! */ |
76 | } | 77 | } |
77 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ | 78 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ |
79 | barrier(); | ||
80 | rcu_dynticks_nesting = newval; | ||
78 | } | 81 | } |
79 | 82 | ||
80 | /* | 83 | /* |
@@ -84,17 +87,16 @@ static void rcu_idle_enter_common(long long oldval) | |||
84 | void rcu_idle_enter(void) | 87 | void rcu_idle_enter(void) |
85 | { | 88 | { |
86 | unsigned long flags; | 89 | unsigned long flags; |
87 | long long oldval; | 90 | long long newval; |
88 | 91 | ||
89 | local_irq_save(flags); | 92 | local_irq_save(flags); |
90 | oldval = rcu_dynticks_nesting; | ||
91 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); | 93 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); |
92 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == | 94 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == |
93 | DYNTICK_TASK_NEST_VALUE) | 95 | DYNTICK_TASK_NEST_VALUE) |
94 | rcu_dynticks_nesting = 0; | 96 | newval = 0; |
95 | else | 97 | else |
96 | rcu_dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | 98 | newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; |
97 | rcu_idle_enter_common(oldval); | 99 | rcu_idle_enter_common(newval); |
98 | local_irq_restore(flags); | 100 | local_irq_restore(flags); |
99 | } | 101 | } |
100 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 102 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
@@ -105,15 +107,15 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); | |||
105 | void rcu_irq_exit(void) | 107 | void rcu_irq_exit(void) |
106 | { | 108 | { |
107 | unsigned long flags; | 109 | unsigned long flags; |
108 | long long oldval; | 110 | long long newval; |
109 | 111 | ||
110 | local_irq_save(flags); | 112 | local_irq_save(flags); |
111 | oldval = rcu_dynticks_nesting; | 113 | newval = rcu_dynticks_nesting - 1; |
112 | rcu_dynticks_nesting--; | 114 | WARN_ON_ONCE(newval < 0); |
113 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); | 115 | rcu_idle_enter_common(newval); |
114 | rcu_idle_enter_common(oldval); | ||
115 | local_irq_restore(flags); | 116 | local_irq_restore(flags); |
116 | } | 117 | } |
118 | EXPORT_SYMBOL_GPL(rcu_irq_exit); | ||
117 | 119 | ||
118 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ | 120 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ |
119 | static void rcu_idle_exit_common(long long oldval) | 121 | static void rcu_idle_exit_common(long long oldval) |
@@ -171,6 +173,7 @@ void rcu_irq_enter(void) | |||
171 | rcu_idle_exit_common(oldval); | 173 | rcu_idle_exit_common(oldval); |
172 | local_irq_restore(flags); | 174 | local_irq_restore(flags); |
173 | } | 175 | } |
176 | EXPORT_SYMBOL_GPL(rcu_irq_enter); | ||
174 | 177 | ||
175 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 178 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
176 | 179 | ||
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index 918fd1e8509c..3d0190282204 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -278,7 +278,7 @@ static int rcu_boost(void) | |||
278 | rcu_preempt_ctrlblk.exp_tasks == NULL) | 278 | rcu_preempt_ctrlblk.exp_tasks == NULL) |
279 | return 0; /* Nothing to boost. */ | 279 | return 0; /* Nothing to boost. */ |
280 | 280 | ||
281 | raw_local_irq_save(flags); | 281 | local_irq_save(flags); |
282 | 282 | ||
283 | /* | 283 | /* |
284 | * Recheck with irqs disabled: all tasks in need of boosting | 284 | * Recheck with irqs disabled: all tasks in need of boosting |
@@ -287,7 +287,7 @@ static int rcu_boost(void) | |||
287 | */ | 287 | */ |
288 | if (rcu_preempt_ctrlblk.boost_tasks == NULL && | 288 | if (rcu_preempt_ctrlblk.boost_tasks == NULL && |
289 | rcu_preempt_ctrlblk.exp_tasks == NULL) { | 289 | rcu_preempt_ctrlblk.exp_tasks == NULL) { |
290 | raw_local_irq_restore(flags); | 290 | local_irq_restore(flags); |
291 | return 0; | 291 | return 0; |
292 | } | 292 | } |
293 | 293 | ||
@@ -317,7 +317,7 @@ static int rcu_boost(void) | |||
317 | t = container_of(tb, struct task_struct, rcu_node_entry); | 317 | t = container_of(tb, struct task_struct, rcu_node_entry); |
318 | rt_mutex_init_proxy_locked(&mtx, t); | 318 | rt_mutex_init_proxy_locked(&mtx, t); |
319 | t->rcu_boost_mutex = &mtx; | 319 | t->rcu_boost_mutex = &mtx; |
320 | raw_local_irq_restore(flags); | 320 | local_irq_restore(flags); |
321 | rt_mutex_lock(&mtx); | 321 | rt_mutex_lock(&mtx); |
322 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ | 322 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ |
323 | 323 | ||
@@ -991,9 +991,9 @@ static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n) | |||
991 | { | 991 | { |
992 | unsigned long flags; | 992 | unsigned long flags; |
993 | 993 | ||
994 | raw_local_irq_save(flags); | 994 | local_irq_save(flags); |
995 | rcp->qlen -= n; | 995 | rcp->qlen -= n; |
996 | raw_local_irq_restore(flags); | 996 | local_irq_restore(flags); |
997 | } | 997 | } |
998 | 998 | ||
999 | /* | 999 | /* |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 25b15033c61f..aaa7b9f3532a 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -53,10 +53,11 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@fre | |||
53 | 53 | ||
54 | static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ | 54 | static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ |
55 | static int nfakewriters = 4; /* # fake writer threads */ | 55 | static int nfakewriters = 4; /* # fake writer threads */ |
56 | static int stat_interval; /* Interval between stats, in seconds. */ | 56 | static int stat_interval = 60; /* Interval between stats, in seconds. */ |
57 | /* Defaults to "only at end of test". */ | 57 | /* Zero means "only at end of test". */ |
58 | static bool verbose; /* Print more debug info. */ | 58 | static bool verbose; /* Print more debug info. */ |
59 | static bool test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ | 59 | static bool test_no_idle_hz = true; |
60 | /* Test RCU support for tickless idle CPUs. */ | ||
60 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ | 61 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ |
61 | static int stutter = 5; /* Start/stop testing interval (in sec) */ | 62 | static int stutter = 5; /* Start/stop testing interval (in sec) */ |
62 | static int irqreader = 1; /* RCU readers from irq (timers). */ | 63 | static int irqreader = 1; /* RCU readers from irq (timers). */ |
@@ -119,11 +120,11 @@ MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); | |||
119 | 120 | ||
120 | #define TORTURE_FLAG "-torture:" | 121 | #define TORTURE_FLAG "-torture:" |
121 | #define PRINTK_STRING(s) \ | 122 | #define PRINTK_STRING(s) \ |
122 | do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0) | 123 | do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0) |
123 | #define VERBOSE_PRINTK_STRING(s) \ | 124 | #define VERBOSE_PRINTK_STRING(s) \ |
124 | do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0) | 125 | do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0) |
125 | #define VERBOSE_PRINTK_ERRSTRING(s) \ | 126 | #define VERBOSE_PRINTK_ERRSTRING(s) \ |
126 | do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0) | 127 | do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0) |
127 | 128 | ||
128 | static char printk_buf[4096]; | 129 | static char printk_buf[4096]; |
129 | 130 | ||
@@ -176,8 +177,14 @@ static long n_rcu_torture_boosts; | |||
176 | static long n_rcu_torture_timers; | 177 | static long n_rcu_torture_timers; |
177 | static long n_offline_attempts; | 178 | static long n_offline_attempts; |
178 | static long n_offline_successes; | 179 | static long n_offline_successes; |
180 | static unsigned long sum_offline; | ||
181 | static int min_offline = -1; | ||
182 | static int max_offline; | ||
179 | static long n_online_attempts; | 183 | static long n_online_attempts; |
180 | static long n_online_successes; | 184 | static long n_online_successes; |
185 | static unsigned long sum_online; | ||
186 | static int min_online = -1; | ||
187 | static int max_online; | ||
181 | static long n_barrier_attempts; | 188 | static long n_barrier_attempts; |
182 | static long n_barrier_successes; | 189 | static long n_barrier_successes; |
183 | static struct list_head rcu_torture_removed; | 190 | static struct list_head rcu_torture_removed; |
@@ -235,7 +242,7 @@ rcutorture_shutdown_notify(struct notifier_block *unused1, | |||
235 | if (fullstop == FULLSTOP_DONTSTOP) | 242 | if (fullstop == FULLSTOP_DONTSTOP) |
236 | fullstop = FULLSTOP_SHUTDOWN; | 243 | fullstop = FULLSTOP_SHUTDOWN; |
237 | else | 244 | else |
238 | printk(KERN_WARNING /* but going down anyway, so... */ | 245 | pr_warn(/* but going down anyway, so... */ |
239 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | 246 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); |
240 | mutex_unlock(&fullstop_mutex); | 247 | mutex_unlock(&fullstop_mutex); |
241 | return NOTIFY_DONE; | 248 | return NOTIFY_DONE; |
@@ -248,7 +255,7 @@ rcutorture_shutdown_notify(struct notifier_block *unused1, | |||
248 | static void rcutorture_shutdown_absorb(char *title) | 255 | static void rcutorture_shutdown_absorb(char *title) |
249 | { | 256 | { |
250 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | 257 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { |
251 | printk(KERN_NOTICE | 258 | pr_notice( |
252 | "rcutorture thread %s parking due to system shutdown\n", | 259 | "rcutorture thread %s parking due to system shutdown\n", |
253 | title); | 260 | title); |
254 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); | 261 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); |
@@ -1214,11 +1221,13 @@ rcu_torture_printk(char *page) | |||
1214 | n_rcu_torture_boost_failure, | 1221 | n_rcu_torture_boost_failure, |
1215 | n_rcu_torture_boosts, | 1222 | n_rcu_torture_boosts, |
1216 | n_rcu_torture_timers); | 1223 | n_rcu_torture_timers); |
1217 | cnt += sprintf(&page[cnt], "onoff: %ld/%ld:%ld/%ld ", | 1224 | cnt += sprintf(&page[cnt], |
1218 | n_online_successes, | 1225 | "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", |
1219 | n_online_attempts, | 1226 | n_online_successes, n_online_attempts, |
1220 | n_offline_successes, | 1227 | n_offline_successes, n_offline_attempts, |
1221 | n_offline_attempts); | 1228 | min_online, max_online, |
1229 | min_offline, max_offline, | ||
1230 | sum_online, sum_offline, HZ); | ||
1222 | cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld", | 1231 | cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld", |
1223 | n_barrier_successes, | 1232 | n_barrier_successes, |
1224 | n_barrier_attempts, | 1233 | n_barrier_attempts, |
@@ -1267,7 +1276,7 @@ rcu_torture_stats_print(void) | |||
1267 | int cnt; | 1276 | int cnt; |
1268 | 1277 | ||
1269 | cnt = rcu_torture_printk(printk_buf); | 1278 | cnt = rcu_torture_printk(printk_buf); |
1270 | printk(KERN_ALERT "%s", printk_buf); | 1279 | pr_alert("%s", printk_buf); |
1271 | } | 1280 | } |
1272 | 1281 | ||
1273 | /* | 1282 | /* |
@@ -1380,20 +1389,20 @@ rcu_torture_stutter(void *arg) | |||
1380 | static inline void | 1389 | static inline void |
1381 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag) | 1390 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag) |
1382 | { | 1391 | { |
1383 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1392 | pr_alert("%s" TORTURE_FLAG |
1384 | "--- %s: nreaders=%d nfakewriters=%d " | 1393 | "--- %s: nreaders=%d nfakewriters=%d " |
1385 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " | 1394 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
1386 | "shuffle_interval=%d stutter=%d irqreader=%d " | 1395 | "shuffle_interval=%d stutter=%d irqreader=%d " |
1387 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " | 1396 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " |
1388 | "test_boost=%d/%d test_boost_interval=%d " | 1397 | "test_boost=%d/%d test_boost_interval=%d " |
1389 | "test_boost_duration=%d shutdown_secs=%d " | 1398 | "test_boost_duration=%d shutdown_secs=%d " |
1390 | "onoff_interval=%d onoff_holdoff=%d\n", | 1399 | "onoff_interval=%d onoff_holdoff=%d\n", |
1391 | torture_type, tag, nrealreaders, nfakewriters, | 1400 | torture_type, tag, nrealreaders, nfakewriters, |
1392 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, | 1401 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
1393 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, | 1402 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, |
1394 | test_boost, cur_ops->can_boost, | 1403 | test_boost, cur_ops->can_boost, |
1395 | test_boost_interval, test_boost_duration, shutdown_secs, | 1404 | test_boost_interval, test_boost_duration, shutdown_secs, |
1396 | onoff_interval, onoff_holdoff); | 1405 | onoff_interval, onoff_holdoff); |
1397 | } | 1406 | } |
1398 | 1407 | ||
1399 | static struct notifier_block rcutorture_shutdown_nb = { | 1408 | static struct notifier_block rcutorture_shutdown_nb = { |
@@ -1460,9 +1469,9 @@ rcu_torture_shutdown(void *arg) | |||
1460 | !kthread_should_stop()) { | 1469 | !kthread_should_stop()) { |
1461 | delta = shutdown_time - jiffies_snap; | 1470 | delta = shutdown_time - jiffies_snap; |
1462 | if (verbose) | 1471 | if (verbose) |
1463 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1472 | pr_alert("%s" TORTURE_FLAG |
1464 | "rcu_torture_shutdown task: %lu jiffies remaining\n", | 1473 | "rcu_torture_shutdown task: %lu jiffies remaining\n", |
1465 | torture_type, delta); | 1474 | torture_type, delta); |
1466 | schedule_timeout_interruptible(delta); | 1475 | schedule_timeout_interruptible(delta); |
1467 | jiffies_snap = ACCESS_ONCE(jiffies); | 1476 | jiffies_snap = ACCESS_ONCE(jiffies); |
1468 | } | 1477 | } |
@@ -1490,8 +1499,10 @@ static int __cpuinit | |||
1490 | rcu_torture_onoff(void *arg) | 1499 | rcu_torture_onoff(void *arg) |
1491 | { | 1500 | { |
1492 | int cpu; | 1501 | int cpu; |
1502 | unsigned long delta; | ||
1493 | int maxcpu = -1; | 1503 | int maxcpu = -1; |
1494 | DEFINE_RCU_RANDOM(rand); | 1504 | DEFINE_RCU_RANDOM(rand); |
1505 | unsigned long starttime; | ||
1495 | 1506 | ||
1496 | VERBOSE_PRINTK_STRING("rcu_torture_onoff task started"); | 1507 | VERBOSE_PRINTK_STRING("rcu_torture_onoff task started"); |
1497 | for_each_online_cpu(cpu) | 1508 | for_each_online_cpu(cpu) |
@@ -1506,29 +1517,51 @@ rcu_torture_onoff(void *arg) | |||
1506 | cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1); | 1517 | cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1); |
1507 | if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { | 1518 | if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { |
1508 | if (verbose) | 1519 | if (verbose) |
1509 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1520 | pr_alert("%s" TORTURE_FLAG |
1510 | "rcu_torture_onoff task: offlining %d\n", | 1521 | "rcu_torture_onoff task: offlining %d\n", |
1511 | torture_type, cpu); | 1522 | torture_type, cpu); |
1523 | starttime = jiffies; | ||
1512 | n_offline_attempts++; | 1524 | n_offline_attempts++; |
1513 | if (cpu_down(cpu) == 0) { | 1525 | if (cpu_down(cpu) == 0) { |
1514 | if (verbose) | 1526 | if (verbose) |
1515 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1527 | pr_alert("%s" TORTURE_FLAG |
1516 | "rcu_torture_onoff task: offlined %d\n", | 1528 | "rcu_torture_onoff task: offlined %d\n", |
1517 | torture_type, cpu); | 1529 | torture_type, cpu); |
1518 | n_offline_successes++; | 1530 | n_offline_successes++; |
1531 | delta = jiffies - starttime; | ||
1532 | sum_offline += delta; | ||
1533 | if (min_offline < 0) { | ||
1534 | min_offline = delta; | ||
1535 | max_offline = delta; | ||
1536 | } | ||
1537 | if (min_offline > delta) | ||
1538 | min_offline = delta; | ||
1539 | if (max_offline < delta) | ||
1540 | max_offline = delta; | ||
1519 | } | 1541 | } |
1520 | } else if (cpu_is_hotpluggable(cpu)) { | 1542 | } else if (cpu_is_hotpluggable(cpu)) { |
1521 | if (verbose) | 1543 | if (verbose) |
1522 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1544 | pr_alert("%s" TORTURE_FLAG |
1523 | "rcu_torture_onoff task: onlining %d\n", | 1545 | "rcu_torture_onoff task: onlining %d\n", |
1524 | torture_type, cpu); | 1546 | torture_type, cpu); |
1547 | starttime = jiffies; | ||
1525 | n_online_attempts++; | 1548 | n_online_attempts++; |
1526 | if (cpu_up(cpu) == 0) { | 1549 | if (cpu_up(cpu) == 0) { |
1527 | if (verbose) | 1550 | if (verbose) |
1528 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1551 | pr_alert("%s" TORTURE_FLAG |
1529 | "rcu_torture_onoff task: onlined %d\n", | 1552 | "rcu_torture_onoff task: onlined %d\n", |
1530 | torture_type, cpu); | 1553 | torture_type, cpu); |
1531 | n_online_successes++; | 1554 | n_online_successes++; |
1555 | delta = jiffies - starttime; | ||
1556 | sum_online += delta; | ||
1557 | if (min_online < 0) { | ||
1558 | min_online = delta; | ||
1559 | max_online = delta; | ||
1560 | } | ||
1561 | if (min_online > delta) | ||
1562 | min_online = delta; | ||
1563 | if (max_online < delta) | ||
1564 | max_online = delta; | ||
1532 | } | 1565 | } |
1533 | } | 1566 | } |
1534 | schedule_timeout_interruptible(onoff_interval * HZ); | 1567 | schedule_timeout_interruptible(onoff_interval * HZ); |
@@ -1593,14 +1626,14 @@ static int __cpuinit rcu_torture_stall(void *args) | |||
1593 | if (!kthread_should_stop()) { | 1626 | if (!kthread_should_stop()) { |
1594 | stop_at = get_seconds() + stall_cpu; | 1627 | stop_at = get_seconds() + stall_cpu; |
1595 | /* RCU CPU stall is expected behavior in following code. */ | 1628 | /* RCU CPU stall is expected behavior in following code. */ |
1596 | printk(KERN_ALERT "rcu_torture_stall start.\n"); | 1629 | pr_alert("rcu_torture_stall start.\n"); |
1597 | rcu_read_lock(); | 1630 | rcu_read_lock(); |
1598 | preempt_disable(); | 1631 | preempt_disable(); |
1599 | while (ULONG_CMP_LT(get_seconds(), stop_at)) | 1632 | while (ULONG_CMP_LT(get_seconds(), stop_at)) |
1600 | continue; /* Induce RCU CPU stall warning. */ | 1633 | continue; /* Induce RCU CPU stall warning. */ |
1601 | preempt_enable(); | 1634 | preempt_enable(); |
1602 | rcu_read_unlock(); | 1635 | rcu_read_unlock(); |
1603 | printk(KERN_ALERT "rcu_torture_stall end.\n"); | 1636 | pr_alert("rcu_torture_stall end.\n"); |
1604 | } | 1637 | } |
1605 | rcutorture_shutdown_absorb("rcu_torture_stall"); | 1638 | rcutorture_shutdown_absorb("rcu_torture_stall"); |
1606 | while (!kthread_should_stop()) | 1639 | while (!kthread_should_stop()) |
@@ -1716,12 +1749,12 @@ static int rcu_torture_barrier_init(void) | |||
1716 | if (n_barrier_cbs == 0) | 1749 | if (n_barrier_cbs == 0) |
1717 | return 0; | 1750 | return 0; |
1718 | if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { | 1751 | if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { |
1719 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1752 | pr_alert("%s" TORTURE_FLAG |
1720 | " Call or barrier ops missing for %s,\n", | 1753 | " Call or barrier ops missing for %s,\n", |
1721 | torture_type, cur_ops->name); | 1754 | torture_type, cur_ops->name); |
1722 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1755 | pr_alert("%s" TORTURE_FLAG |
1723 | " RCU barrier testing omitted from run.\n", | 1756 | " RCU barrier testing omitted from run.\n", |
1724 | torture_type); | 1757 | torture_type); |
1725 | return 0; | 1758 | return 0; |
1726 | } | 1759 | } |
1727 | atomic_set(&barrier_cbs_count, 0); | 1760 | atomic_set(&barrier_cbs_count, 0); |
@@ -1814,7 +1847,7 @@ rcu_torture_cleanup(void) | |||
1814 | mutex_lock(&fullstop_mutex); | 1847 | mutex_lock(&fullstop_mutex); |
1815 | rcutorture_record_test_transition(); | 1848 | rcutorture_record_test_transition(); |
1816 | if (fullstop == FULLSTOP_SHUTDOWN) { | 1849 | if (fullstop == FULLSTOP_SHUTDOWN) { |
1817 | printk(KERN_WARNING /* but going down anyway, so... */ | 1850 | pr_warn(/* but going down anyway, so... */ |
1818 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | 1851 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); |
1819 | mutex_unlock(&fullstop_mutex); | 1852 | mutex_unlock(&fullstop_mutex); |
1820 | schedule_timeout_uninterruptible(10); | 1853 | schedule_timeout_uninterruptible(10); |
@@ -1938,17 +1971,17 @@ rcu_torture_init(void) | |||
1938 | break; | 1971 | break; |
1939 | } | 1972 | } |
1940 | if (i == ARRAY_SIZE(torture_ops)) { | 1973 | if (i == ARRAY_SIZE(torture_ops)) { |
1941 | printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n", | 1974 | pr_alert("rcu-torture: invalid torture type: \"%s\"\n", |
1942 | torture_type); | 1975 | torture_type); |
1943 | printk(KERN_ALERT "rcu-torture types:"); | 1976 | pr_alert("rcu-torture types:"); |
1944 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) | 1977 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
1945 | printk(KERN_ALERT " %s", torture_ops[i]->name); | 1978 | pr_alert(" %s", torture_ops[i]->name); |
1946 | printk(KERN_ALERT "\n"); | 1979 | pr_alert("\n"); |
1947 | mutex_unlock(&fullstop_mutex); | 1980 | mutex_unlock(&fullstop_mutex); |
1948 | return -EINVAL; | 1981 | return -EINVAL; |
1949 | } | 1982 | } |
1950 | if (cur_ops->fqs == NULL && fqs_duration != 0) { | 1983 | if (cur_ops->fqs == NULL && fqs_duration != 0) { |
1951 | printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); | 1984 | pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); |
1952 | fqs_duration = 0; | 1985 | fqs_duration = 0; |
1953 | } | 1986 | } |
1954 | if (cur_ops->init) | 1987 | if (cur_ops->init) |
@@ -1996,14 +2029,15 @@ rcu_torture_init(void) | |||
1996 | /* Start up the kthreads. */ | 2029 | /* Start up the kthreads. */ |
1997 | 2030 | ||
1998 | VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task"); | 2031 | VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task"); |
1999 | writer_task = kthread_run(rcu_torture_writer, NULL, | 2032 | writer_task = kthread_create(rcu_torture_writer, NULL, |
2000 | "rcu_torture_writer"); | 2033 | "rcu_torture_writer"); |
2001 | if (IS_ERR(writer_task)) { | 2034 | if (IS_ERR(writer_task)) { |
2002 | firsterr = PTR_ERR(writer_task); | 2035 | firsterr = PTR_ERR(writer_task); |
2003 | VERBOSE_PRINTK_ERRSTRING("Failed to create writer"); | 2036 | VERBOSE_PRINTK_ERRSTRING("Failed to create writer"); |
2004 | writer_task = NULL; | 2037 | writer_task = NULL; |
2005 | goto unwind; | 2038 | goto unwind; |
2006 | } | 2039 | } |
2040 | wake_up_process(writer_task); | ||
2007 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), | 2041 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), |
2008 | GFP_KERNEL); | 2042 | GFP_KERNEL); |
2009 | if (fakewriter_tasks == NULL) { | 2043 | if (fakewriter_tasks == NULL) { |
@@ -2118,14 +2152,15 @@ rcu_torture_init(void) | |||
2118 | } | 2152 | } |
2119 | if (shutdown_secs > 0) { | 2153 | if (shutdown_secs > 0) { |
2120 | shutdown_time = jiffies + shutdown_secs * HZ; | 2154 | shutdown_time = jiffies + shutdown_secs * HZ; |
2121 | shutdown_task = kthread_run(rcu_torture_shutdown, NULL, | 2155 | shutdown_task = kthread_create(rcu_torture_shutdown, NULL, |
2122 | "rcu_torture_shutdown"); | 2156 | "rcu_torture_shutdown"); |
2123 | if (IS_ERR(shutdown_task)) { | 2157 | if (IS_ERR(shutdown_task)) { |
2124 | firsterr = PTR_ERR(shutdown_task); | 2158 | firsterr = PTR_ERR(shutdown_task); |
2125 | VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown"); | 2159 | VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown"); |
2126 | shutdown_task = NULL; | 2160 | shutdown_task = NULL; |
2127 | goto unwind; | 2161 | goto unwind; |
2128 | } | 2162 | } |
2163 | wake_up_process(shutdown_task); | ||
2129 | } | 2164 | } |
2130 | i = rcu_torture_onoff_init(); | 2165 | i = rcu_torture_onoff_init(); |
2131 | if (i != 0) { | 2166 | if (i != 0) { |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f280e542e3e9..4fb2376ddf06 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/prefetch.h> | 52 | #include <linux/prefetch.h> |
53 | #include <linux/delay.h> | 53 | #include <linux/delay.h> |
54 | #include <linux/stop_machine.h> | 54 | #include <linux/stop_machine.h> |
55 | #include <linux/random.h> | ||
55 | 56 | ||
56 | #include "rcutree.h" | 57 | #include "rcutree.h" |
57 | #include <trace/events/rcu.h> | 58 | #include <trace/events/rcu.h> |
@@ -61,6 +62,7 @@ | |||
61 | /* Data structures. */ | 62 | /* Data structures. */ |
62 | 63 | ||
63 | static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; | 64 | static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; |
65 | static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; | ||
64 | 66 | ||
65 | #define RCU_STATE_INITIALIZER(sname, cr) { \ | 67 | #define RCU_STATE_INITIALIZER(sname, cr) { \ |
66 | .level = { &sname##_state.node[0] }, \ | 68 | .level = { &sname##_state.node[0] }, \ |
@@ -72,7 +74,6 @@ static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; | |||
72 | .orphan_nxttail = &sname##_state.orphan_nxtlist, \ | 74 | .orphan_nxttail = &sname##_state.orphan_nxtlist, \ |
73 | .orphan_donetail = &sname##_state.orphan_donelist, \ | 75 | .orphan_donetail = &sname##_state.orphan_donelist, \ |
74 | .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ | 76 | .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ |
75 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \ | ||
76 | .name = #sname, \ | 77 | .name = #sname, \ |
77 | } | 78 | } |
78 | 79 | ||
@@ -88,7 +89,7 @@ LIST_HEAD(rcu_struct_flavors); | |||
88 | 89 | ||
89 | /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ | 90 | /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ |
90 | static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; | 91 | static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; |
91 | module_param(rcu_fanout_leaf, int, 0); | 92 | module_param(rcu_fanout_leaf, int, 0444); |
92 | int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; | 93 | int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; |
93 | static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */ | 94 | static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */ |
94 | NUM_RCU_LVL_0, | 95 | NUM_RCU_LVL_0, |
@@ -133,13 +134,12 @@ static int rcu_scheduler_fully_active __read_mostly; | |||
133 | */ | 134 | */ |
134 | static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); | 135 | static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); |
135 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | 136 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
136 | DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); | ||
137 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | 137 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
138 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | 138 | DEFINE_PER_CPU(char, rcu_cpu_has_work); |
139 | 139 | ||
140 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 140 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
141 | 141 | ||
142 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | 142 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
143 | static void invoke_rcu_core(void); | 143 | static void invoke_rcu_core(void); |
144 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | 144 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); |
145 | 145 | ||
@@ -175,8 +175,6 @@ void rcu_sched_qs(int cpu) | |||
175 | { | 175 | { |
176 | struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); | 176 | struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); |
177 | 177 | ||
178 | rdp->passed_quiesce_gpnum = rdp->gpnum; | ||
179 | barrier(); | ||
180 | if (rdp->passed_quiesce == 0) | 178 | if (rdp->passed_quiesce == 0) |
181 | trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs"); | 179 | trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs"); |
182 | rdp->passed_quiesce = 1; | 180 | rdp->passed_quiesce = 1; |
@@ -186,8 +184,6 @@ void rcu_bh_qs(int cpu) | |||
186 | { | 184 | { |
187 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | 185 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); |
188 | 186 | ||
189 | rdp->passed_quiesce_gpnum = rdp->gpnum; | ||
190 | barrier(); | ||
191 | if (rdp->passed_quiesce == 0) | 187 | if (rdp->passed_quiesce == 0) |
192 | trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); | 188 | trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); |
193 | rdp->passed_quiesce = 1; | 189 | rdp->passed_quiesce = 1; |
@@ -210,15 +206,18 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch); | |||
210 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 206 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
211 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, | 207 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, |
212 | .dynticks = ATOMIC_INIT(1), | 208 | .dynticks = ATOMIC_INIT(1), |
209 | #if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE) | ||
210 | .ignore_user_qs = true, | ||
211 | #endif | ||
213 | }; | 212 | }; |
214 | 213 | ||
215 | static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ | 214 | static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ |
216 | static int qhimark = 10000; /* If this many pending, ignore blimit. */ | 215 | static int qhimark = 10000; /* If this many pending, ignore blimit. */ |
217 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | 216 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
218 | 217 | ||
219 | module_param(blimit, int, 0); | 218 | module_param(blimit, int, 0444); |
220 | module_param(qhimark, int, 0); | 219 | module_param(qhimark, int, 0444); |
221 | module_param(qlowmark, int, 0); | 220 | module_param(qlowmark, int, 0444); |
222 | 221 | ||
223 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ | 222 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ |
224 | int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; | 223 | int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
@@ -226,7 +225,14 @@ int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; | |||
226 | module_param(rcu_cpu_stall_suppress, int, 0644); | 225 | module_param(rcu_cpu_stall_suppress, int, 0644); |
227 | module_param(rcu_cpu_stall_timeout, int, 0644); | 226 | module_param(rcu_cpu_stall_timeout, int, 0644); |
228 | 227 | ||
229 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 228 | static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS; |
229 | static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; | ||
230 | |||
231 | module_param(jiffies_till_first_fqs, ulong, 0644); | ||
232 | module_param(jiffies_till_next_fqs, ulong, 0644); | ||
233 | |||
234 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)); | ||
235 | static void force_quiescent_state(struct rcu_state *rsp); | ||
230 | static int rcu_pending(int cpu); | 236 | static int rcu_pending(int cpu); |
231 | 237 | ||
232 | /* | 238 | /* |
@@ -252,7 +258,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | |||
252 | */ | 258 | */ |
253 | void rcu_bh_force_quiescent_state(void) | 259 | void rcu_bh_force_quiescent_state(void) |
254 | { | 260 | { |
255 | force_quiescent_state(&rcu_bh_state, 0); | 261 | force_quiescent_state(&rcu_bh_state); |
256 | } | 262 | } |
257 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); | 263 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); |
258 | 264 | ||
@@ -286,7 +292,7 @@ EXPORT_SYMBOL_GPL(rcutorture_record_progress); | |||
286 | */ | 292 | */ |
287 | void rcu_sched_force_quiescent_state(void) | 293 | void rcu_sched_force_quiescent_state(void) |
288 | { | 294 | { |
289 | force_quiescent_state(&rcu_sched_state, 0); | 295 | force_quiescent_state(&rcu_sched_state); |
290 | } | 296 | } |
291 | EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); | 297 | EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); |
292 | 298 | ||
@@ -305,7 +311,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) | |||
305 | static int | 311 | static int |
306 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | 312 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) |
307 | { | 313 | { |
308 | return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); | 314 | return *rdp->nxttail[RCU_DONE_TAIL + |
315 | ACCESS_ONCE(rsp->completed) != rdp->completed] && | ||
316 | !rcu_gp_in_progress(rsp); | ||
309 | } | 317 | } |
310 | 318 | ||
311 | /* | 319 | /* |
@@ -317,45 +325,17 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
317 | } | 325 | } |
318 | 326 | ||
319 | /* | 327 | /* |
320 | * If the specified CPU is offline, tell the caller that it is in | 328 | * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state |
321 | * a quiescent state. Otherwise, whack it with a reschedule IPI. | ||
322 | * Grace periods can end up waiting on an offline CPU when that | ||
323 | * CPU is in the process of coming online -- it will be added to the | ||
324 | * rcu_node bitmasks before it actually makes it online. The same thing | ||
325 | * can happen while a CPU is in the process of coming online. Because this | ||
326 | * race is quite rare, we check for it after detecting that the grace | ||
327 | * period has been delayed rather than checking each and every CPU | ||
328 | * each and every time we start a new grace period. | ||
329 | */ | ||
330 | static int rcu_implicit_offline_qs(struct rcu_data *rdp) | ||
331 | { | ||
332 | /* | ||
333 | * If the CPU is offline for more than a jiffy, it is in a quiescent | ||
334 | * state. We can trust its state not to change because interrupts | ||
335 | * are disabled. The reason for the jiffy's worth of slack is to | ||
336 | * handle CPUs initializing on the way up and finding their way | ||
337 | * to the idle loop on the way down. | ||
338 | */ | ||
339 | if (cpu_is_offline(rdp->cpu) && | ||
340 | ULONG_CMP_LT(rdp->rsp->gp_start + 2, jiffies)) { | ||
341 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); | ||
342 | rdp->offline_fqs++; | ||
343 | return 1; | ||
344 | } | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle | ||
350 | * | 329 | * |
351 | * If the new value of the ->dynticks_nesting counter now is zero, | 330 | * If the new value of the ->dynticks_nesting counter now is zero, |
352 | * we really have entered idle, and must do the appropriate accounting. | 331 | * we really have entered idle, and must do the appropriate accounting. |
353 | * The caller must have disabled interrupts. | 332 | * The caller must have disabled interrupts. |
354 | */ | 333 | */ |
355 | static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | 334 | static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, |
335 | bool user) | ||
356 | { | 336 | { |
357 | trace_rcu_dyntick("Start", oldval, 0); | 337 | trace_rcu_dyntick("Start", oldval, 0); |
358 | if (!is_idle_task(current)) { | 338 | if (!user && !is_idle_task(current)) { |
359 | struct task_struct *idle = idle_task(smp_processor_id()); | 339 | struct task_struct *idle = idle_task(smp_processor_id()); |
360 | 340 | ||
361 | trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); | 341 | trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); |
@@ -372,7 +352,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
372 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); | 352 | WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
373 | 353 | ||
374 | /* | 354 | /* |
375 | * The idle task is not permitted to enter the idle loop while | 355 | * It is illegal to enter an extended quiescent state while |
376 | * in an RCU read-side critical section. | 356 | * in an RCU read-side critical section. |
377 | */ | 357 | */ |
378 | rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), | 358 | rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), |
@@ -383,6 +363,25 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
383 | "Illegal idle entry in RCU-sched read-side critical section."); | 363 | "Illegal idle entry in RCU-sched read-side critical section."); |
384 | } | 364 | } |
385 | 365 | ||
366 | /* | ||
367 | * Enter an RCU extended quiescent state, which can be either the | ||
368 | * idle loop or adaptive-tickless usermode execution. | ||
369 | */ | ||
370 | static void rcu_eqs_enter(bool user) | ||
371 | { | ||
372 | long long oldval; | ||
373 | struct rcu_dynticks *rdtp; | ||
374 | |||
375 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
376 | oldval = rdtp->dynticks_nesting; | ||
377 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | ||
378 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | ||
379 | rdtp->dynticks_nesting = 0; | ||
380 | else | ||
381 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | ||
382 | rcu_eqs_enter_common(rdtp, oldval, user); | ||
383 | } | ||
384 | |||
386 | /** | 385 | /** |
387 | * rcu_idle_enter - inform RCU that current CPU is entering idle | 386 | * rcu_idle_enter - inform RCU that current CPU is entering idle |
388 | * | 387 | * |
@@ -398,21 +397,70 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) | |||
398 | void rcu_idle_enter(void) | 397 | void rcu_idle_enter(void) |
399 | { | 398 | { |
400 | unsigned long flags; | 399 | unsigned long flags; |
401 | long long oldval; | 400 | |
401 | local_irq_save(flags); | ||
402 | rcu_eqs_enter(false); | ||
403 | local_irq_restore(flags); | ||
404 | } | ||
405 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | ||
406 | |||
407 | #ifdef CONFIG_RCU_USER_QS | ||
408 | /** | ||
409 | * rcu_user_enter - inform RCU that we are resuming userspace. | ||
410 | * | ||
411 | * Enter RCU idle mode right before resuming userspace. No use of RCU | ||
412 | * is permitted between this call and rcu_user_exit(). This way the | ||
413 | * CPU doesn't need to maintain the tick for RCU maintenance purposes | ||
414 | * when the CPU runs in userspace. | ||
415 | */ | ||
416 | void rcu_user_enter(void) | ||
417 | { | ||
418 | unsigned long flags; | ||
402 | struct rcu_dynticks *rdtp; | 419 | struct rcu_dynticks *rdtp; |
403 | 420 | ||
421 | /* | ||
422 | * Some contexts may involve an exception occuring in an irq, | ||
423 | * leading to that nesting: | ||
424 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
425 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
426 | * helpers are enough to protect RCU uses inside the exception. So | ||
427 | * just return immediately if we detect we are in an IRQ. | ||
428 | */ | ||
429 | if (in_interrupt()) | ||
430 | return; | ||
431 | |||
432 | WARN_ON_ONCE(!current->mm); | ||
433 | |||
404 | local_irq_save(flags); | 434 | local_irq_save(flags); |
405 | rdtp = &__get_cpu_var(rcu_dynticks); | 435 | rdtp = &__get_cpu_var(rcu_dynticks); |
406 | oldval = rdtp->dynticks_nesting; | 436 | if (!rdtp->ignore_user_qs && !rdtp->in_user) { |
407 | WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); | 437 | rdtp->in_user = true; |
408 | if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) | 438 | rcu_eqs_enter(true); |
409 | rdtp->dynticks_nesting = 0; | 439 | } |
410 | else | ||
411 | rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; | ||
412 | rcu_idle_enter_common(rdtp, oldval); | ||
413 | local_irq_restore(flags); | 440 | local_irq_restore(flags); |
414 | } | 441 | } |
415 | EXPORT_SYMBOL_GPL(rcu_idle_enter); | 442 | |
443 | /** | ||
444 | * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace | ||
445 | * after the current irq returns. | ||
446 | * | ||
447 | * This is similar to rcu_user_enter() but in the context of a non-nesting | ||
448 | * irq. After this call, RCU enters into idle mode when the interrupt | ||
449 | * returns. | ||
450 | */ | ||
451 | void rcu_user_enter_after_irq(void) | ||
452 | { | ||
453 | unsigned long flags; | ||
454 | struct rcu_dynticks *rdtp; | ||
455 | |||
456 | local_irq_save(flags); | ||
457 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
458 | /* Ensure this irq is interrupting a non-idle RCU state. */ | ||
459 | WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK)); | ||
460 | rdtp->dynticks_nesting = 1; | ||
461 | local_irq_restore(flags); | ||
462 | } | ||
463 | #endif /* CONFIG_RCU_USER_QS */ | ||
416 | 464 | ||
417 | /** | 465 | /** |
418 | * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle | 466 | * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle |
@@ -444,18 +492,19 @@ void rcu_irq_exit(void) | |||
444 | if (rdtp->dynticks_nesting) | 492 | if (rdtp->dynticks_nesting) |
445 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); | 493 | trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); |
446 | else | 494 | else |
447 | rcu_idle_enter_common(rdtp, oldval); | 495 | rcu_eqs_enter_common(rdtp, oldval, true); |
448 | local_irq_restore(flags); | 496 | local_irq_restore(flags); |
449 | } | 497 | } |
450 | 498 | ||
451 | /* | 499 | /* |
452 | * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle | 500 | * rcu_eqs_exit_common - current CPU moving away from extended quiescent state |
453 | * | 501 | * |
454 | * If the new value of the ->dynticks_nesting counter was previously zero, | 502 | * If the new value of the ->dynticks_nesting counter was previously zero, |
455 | * we really have exited idle, and must do the appropriate accounting. | 503 | * we really have exited idle, and must do the appropriate accounting. |
456 | * The caller must have disabled interrupts. | 504 | * The caller must have disabled interrupts. |
457 | */ | 505 | */ |
458 | static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | 506 | static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, |
507 | int user) | ||
459 | { | 508 | { |
460 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ | 509 | smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ |
461 | atomic_inc(&rdtp->dynticks); | 510 | atomic_inc(&rdtp->dynticks); |
@@ -464,7 +513,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
464 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); | 513 | WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
465 | rcu_cleanup_after_idle(smp_processor_id()); | 514 | rcu_cleanup_after_idle(smp_processor_id()); |
466 | trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); | 515 | trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); |
467 | if (!is_idle_task(current)) { | 516 | if (!user && !is_idle_task(current)) { |
468 | struct task_struct *idle = idle_task(smp_processor_id()); | 517 | struct task_struct *idle = idle_task(smp_processor_id()); |
469 | 518 | ||
470 | trace_rcu_dyntick("Error on exit: not idle task", | 519 | trace_rcu_dyntick("Error on exit: not idle task", |
@@ -476,6 +525,25 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
476 | } | 525 | } |
477 | } | 526 | } |
478 | 527 | ||
528 | /* | ||
529 | * Exit an RCU extended quiescent state, which can be either the | ||
530 | * idle loop or adaptive-tickless usermode execution. | ||
531 | */ | ||
532 | static void rcu_eqs_exit(bool user) | ||
533 | { | ||
534 | struct rcu_dynticks *rdtp; | ||
535 | long long oldval; | ||
536 | |||
537 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
538 | oldval = rdtp->dynticks_nesting; | ||
539 | WARN_ON_ONCE(oldval < 0); | ||
540 | if (oldval & DYNTICK_TASK_NEST_MASK) | ||
541 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | ||
542 | else | ||
543 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
544 | rcu_eqs_exit_common(rdtp, oldval, user); | ||
545 | } | ||
546 | |||
479 | /** | 547 | /** |
480 | * rcu_idle_exit - inform RCU that current CPU is leaving idle | 548 | * rcu_idle_exit - inform RCU that current CPU is leaving idle |
481 | * | 549 | * |
@@ -490,21 +558,67 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) | |||
490 | void rcu_idle_exit(void) | 558 | void rcu_idle_exit(void) |
491 | { | 559 | { |
492 | unsigned long flags; | 560 | unsigned long flags; |
561 | |||
562 | local_irq_save(flags); | ||
563 | rcu_eqs_exit(false); | ||
564 | local_irq_restore(flags); | ||
565 | } | ||
566 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | ||
567 | |||
568 | #ifdef CONFIG_RCU_USER_QS | ||
569 | /** | ||
570 | * rcu_user_exit - inform RCU that we are exiting userspace. | ||
571 | * | ||
572 | * Exit RCU idle mode while entering the kernel because it can | ||
573 | * run a RCU read side critical section anytime. | ||
574 | */ | ||
575 | void rcu_user_exit(void) | ||
576 | { | ||
577 | unsigned long flags; | ||
493 | struct rcu_dynticks *rdtp; | 578 | struct rcu_dynticks *rdtp; |
494 | long long oldval; | 579 | |
580 | /* | ||
581 | * Some contexts may involve an exception occuring in an irq, | ||
582 | * leading to that nesting: | ||
583 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
584 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
585 | * helpers are enough to protect RCU uses inside the exception. So | ||
586 | * just return immediately if we detect we are in an IRQ. | ||
587 | */ | ||
588 | if (in_interrupt()) | ||
589 | return; | ||
495 | 590 | ||
496 | local_irq_save(flags); | 591 | local_irq_save(flags); |
497 | rdtp = &__get_cpu_var(rcu_dynticks); | 592 | rdtp = &__get_cpu_var(rcu_dynticks); |
498 | oldval = rdtp->dynticks_nesting; | 593 | if (rdtp->in_user) { |
499 | WARN_ON_ONCE(oldval < 0); | 594 | rdtp->in_user = false; |
500 | if (oldval & DYNTICK_TASK_NEST_MASK) | 595 | rcu_eqs_exit(true); |
501 | rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; | 596 | } |
502 | else | ||
503 | rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | ||
504 | rcu_idle_exit_common(rdtp, oldval); | ||
505 | local_irq_restore(flags); | 597 | local_irq_restore(flags); |
506 | } | 598 | } |
507 | EXPORT_SYMBOL_GPL(rcu_idle_exit); | 599 | |
600 | /** | ||
601 | * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace | ||
602 | * idle mode after the current non-nesting irq returns. | ||
603 | * | ||
604 | * This is similar to rcu_user_exit() but in the context of an irq. | ||
605 | * This is called when the irq has interrupted a userspace RCU idle mode | ||
606 | * context. When the current non-nesting interrupt returns after this call, | ||
607 | * the CPU won't restore the RCU idle mode. | ||
608 | */ | ||
609 | void rcu_user_exit_after_irq(void) | ||
610 | { | ||
611 | unsigned long flags; | ||
612 | struct rcu_dynticks *rdtp; | ||
613 | |||
614 | local_irq_save(flags); | ||
615 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
616 | /* Ensure we are interrupting an RCU idle mode. */ | ||
617 | WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK); | ||
618 | rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE; | ||
619 | local_irq_restore(flags); | ||
620 | } | ||
621 | #endif /* CONFIG_RCU_USER_QS */ | ||
508 | 622 | ||
509 | /** | 623 | /** |
510 | * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle | 624 | * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle |
@@ -539,7 +653,7 @@ void rcu_irq_enter(void) | |||
539 | if (oldval) | 653 | if (oldval) |
540 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); | 654 | trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); |
541 | else | 655 | else |
542 | rcu_idle_exit_common(rdtp, oldval); | 656 | rcu_eqs_exit_common(rdtp, oldval, true); |
543 | local_irq_restore(flags); | 657 | local_irq_restore(flags); |
544 | } | 658 | } |
545 | 659 | ||
@@ -603,6 +717,21 @@ int rcu_is_cpu_idle(void) | |||
603 | } | 717 | } |
604 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 718 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
605 | 719 | ||
720 | #ifdef CONFIG_RCU_USER_QS | ||
721 | void rcu_user_hooks_switch(struct task_struct *prev, | ||
722 | struct task_struct *next) | ||
723 | { | ||
724 | struct rcu_dynticks *rdtp; | ||
725 | |||
726 | /* Interrupts are disabled in context switch */ | ||
727 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
728 | if (!rdtp->ignore_user_qs) { | ||
729 | clear_tsk_thread_flag(prev, TIF_NOHZ); | ||
730 | set_tsk_thread_flag(next, TIF_NOHZ); | ||
731 | } | ||
732 | } | ||
733 | #endif /* #ifdef CONFIG_RCU_USER_QS */ | ||
734 | |||
606 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) | 735 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) |
607 | 736 | ||
608 | /* | 737 | /* |
@@ -673,7 +802,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) | |||
673 | * Return true if the specified CPU has passed through a quiescent | 802 | * Return true if the specified CPU has passed through a quiescent |
674 | * state by virtue of being in or having passed through an dynticks | 803 | * state by virtue of being in or having passed through an dynticks |
675 | * idle state since the last call to dyntick_save_progress_counter() | 804 | * idle state since the last call to dyntick_save_progress_counter() |
676 | * for this same CPU. | 805 | * for this same CPU, or by virtue of having been offline. |
677 | */ | 806 | */ |
678 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | 807 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) |
679 | { | 808 | { |
@@ -697,8 +826,26 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
697 | return 1; | 826 | return 1; |
698 | } | 827 | } |
699 | 828 | ||
700 | /* Go check for the CPU being offline. */ | 829 | /* |
701 | return rcu_implicit_offline_qs(rdp); | 830 | * Check for the CPU being offline, but only if the grace period |
831 | * is old enough. We don't need to worry about the CPU changing | ||
832 | * state: If we see it offline even once, it has been through a | ||
833 | * quiescent state. | ||
834 | * | ||
835 | * The reason for insisting that the grace period be at least | ||
836 | * one jiffy old is that CPUs that are not quite online and that | ||
837 | * have just gone offline can still execute RCU read-side critical | ||
838 | * sections. | ||
839 | */ | ||
840 | if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies)) | ||
841 | return 0; /* Grace period is not old enough. */ | ||
842 | barrier(); | ||
843 | if (cpu_is_offline(rdp->cpu)) { | ||
844 | trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); | ||
845 | rdp->offline_fqs++; | ||
846 | return 1; | ||
847 | } | ||
848 | return 0; | ||
702 | } | 849 | } |
703 | 850 | ||
704 | static int jiffies_till_stall_check(void) | 851 | static int jiffies_till_stall_check(void) |
@@ -755,14 +902,15 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
755 | rcu_for_each_leaf_node(rsp, rnp) { | 902 | rcu_for_each_leaf_node(rsp, rnp) { |
756 | raw_spin_lock_irqsave(&rnp->lock, flags); | 903 | raw_spin_lock_irqsave(&rnp->lock, flags); |
757 | ndetected += rcu_print_task_stall(rnp); | 904 | ndetected += rcu_print_task_stall(rnp); |
905 | if (rnp->qsmask != 0) { | ||
906 | for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) | ||
907 | if (rnp->qsmask & (1UL << cpu)) { | ||
908 | print_cpu_stall_info(rsp, | ||
909 | rnp->grplo + cpu); | ||
910 | ndetected++; | ||
911 | } | ||
912 | } | ||
758 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 913 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
759 | if (rnp->qsmask == 0) | ||
760 | continue; | ||
761 | for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) | ||
762 | if (rnp->qsmask & (1UL << cpu)) { | ||
763 | print_cpu_stall_info(rsp, rnp->grplo + cpu); | ||
764 | ndetected++; | ||
765 | } | ||
766 | } | 914 | } |
767 | 915 | ||
768 | /* | 916 | /* |
@@ -782,11 +930,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
782 | else if (!trigger_all_cpu_backtrace()) | 930 | else if (!trigger_all_cpu_backtrace()) |
783 | dump_stack(); | 931 | dump_stack(); |
784 | 932 | ||
785 | /* If so configured, complain about tasks blocking the grace period. */ | 933 | /* Complain about tasks blocking the grace period. */ |
786 | 934 | ||
787 | rcu_print_detail_task_stall(rsp); | 935 | rcu_print_detail_task_stall(rsp); |
788 | 936 | ||
789 | force_quiescent_state(rsp, 0); /* Kick them all. */ | 937 | force_quiescent_state(rsp); /* Kick them all. */ |
790 | } | 938 | } |
791 | 939 | ||
792 | static void print_cpu_stall(struct rcu_state *rsp) | 940 | static void print_cpu_stall(struct rcu_state *rsp) |
@@ -827,7 +975,8 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
827 | j = ACCESS_ONCE(jiffies); | 975 | j = ACCESS_ONCE(jiffies); |
828 | js = ACCESS_ONCE(rsp->jiffies_stall); | 976 | js = ACCESS_ONCE(rsp->jiffies_stall); |
829 | rnp = rdp->mynode; | 977 | rnp = rdp->mynode; |
830 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { | 978 | if (rcu_gp_in_progress(rsp) && |
979 | (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { | ||
831 | 980 | ||
832 | /* We haven't checked in, so go dump stack. */ | 981 | /* We haven't checked in, so go dump stack. */ |
833 | print_cpu_stall(rsp); | 982 | print_cpu_stall(rsp); |
@@ -889,12 +1038,8 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct | |||
889 | */ | 1038 | */ |
890 | rdp->gpnum = rnp->gpnum; | 1039 | rdp->gpnum = rnp->gpnum; |
891 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart"); | 1040 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart"); |
892 | if (rnp->qsmask & rdp->grpmask) { | 1041 | rdp->passed_quiesce = 0; |
893 | rdp->qs_pending = 1; | 1042 | rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); |
894 | rdp->passed_quiesce = 0; | ||
895 | } else { | ||
896 | rdp->qs_pending = 0; | ||
897 | } | ||
898 | zero_cpu_stall_ticks(rdp); | 1043 | zero_cpu_stall_ticks(rdp); |
899 | } | 1044 | } |
900 | } | 1045 | } |
@@ -974,10 +1119,13 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat | |||
974 | * our behalf. Catch up with this state to avoid noting | 1119 | * our behalf. Catch up with this state to avoid noting |
975 | * spurious new grace periods. If another grace period | 1120 | * spurious new grace periods. If another grace period |
976 | * has started, then rnp->gpnum will have advanced, so | 1121 | * has started, then rnp->gpnum will have advanced, so |
977 | * we will detect this later on. | 1122 | * we will detect this later on. Of course, any quiescent |
1123 | * states we found for the old GP are now invalid. | ||
978 | */ | 1124 | */ |
979 | if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) | 1125 | if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) { |
980 | rdp->gpnum = rdp->completed; | 1126 | rdp->gpnum = rdp->completed; |
1127 | rdp->passed_quiesce = 0; | ||
1128 | } | ||
981 | 1129 | ||
982 | /* | 1130 | /* |
983 | * If RCU does not need a quiescent state from this CPU, | 1131 | * If RCU does not need a quiescent state from this CPU, |
@@ -1021,97 +1169,56 @@ rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat | |||
1021 | /* Prior grace period ended, so advance callbacks for current CPU. */ | 1169 | /* Prior grace period ended, so advance callbacks for current CPU. */ |
1022 | __rcu_process_gp_end(rsp, rnp, rdp); | 1170 | __rcu_process_gp_end(rsp, rnp, rdp); |
1023 | 1171 | ||
1024 | /* | ||
1025 | * Because this CPU just now started the new grace period, we know | ||
1026 | * that all of its callbacks will be covered by this upcoming grace | ||
1027 | * period, even the ones that were registered arbitrarily recently. | ||
1028 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
1029 | * | ||
1030 | * Other CPUs cannot be sure exactly when the grace period started. | ||
1031 | * Therefore, their recently registered callbacks must pass through | ||
1032 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
1033 | * by the next RCU grace period. | ||
1034 | */ | ||
1035 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
1036 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
1037 | |||
1038 | /* Set state so that this CPU will detect the next quiescent state. */ | 1172 | /* Set state so that this CPU will detect the next quiescent state. */ |
1039 | __note_new_gpnum(rsp, rnp, rdp); | 1173 | __note_new_gpnum(rsp, rnp, rdp); |
1040 | } | 1174 | } |
1041 | 1175 | ||
1042 | /* | 1176 | /* |
1043 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | 1177 | * Initialize a new grace period. |
1044 | * in preparation for detecting the next grace period. The caller must hold | ||
1045 | * the root node's ->lock, which is released before return. Hard irqs must | ||
1046 | * be disabled. | ||
1047 | * | ||
1048 | * Note that it is legal for a dying CPU (which is marked as offline) to | ||
1049 | * invoke this function. This can happen when the dying CPU reports its | ||
1050 | * quiescent state. | ||
1051 | */ | 1178 | */ |
1052 | static void | 1179 | static int rcu_gp_init(struct rcu_state *rsp) |
1053 | rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | ||
1054 | __releases(rcu_get_root(rsp)->lock) | ||
1055 | { | 1180 | { |
1056 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | 1181 | struct rcu_data *rdp; |
1057 | struct rcu_node *rnp = rcu_get_root(rsp); | 1182 | struct rcu_node *rnp = rcu_get_root(rsp); |
1058 | 1183 | ||
1059 | if (!rcu_scheduler_fully_active || | 1184 | raw_spin_lock_irq(&rnp->lock); |
1060 | !cpu_needs_another_gp(rsp, rdp)) { | 1185 | rsp->gp_flags = 0; /* Clear all flags: New grace period. */ |
1061 | /* | ||
1062 | * Either the scheduler hasn't yet spawned the first | ||
1063 | * non-idle task or this CPU does not need another | ||
1064 | * grace period. Either way, don't start a new grace | ||
1065 | * period. | ||
1066 | */ | ||
1067 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1068 | return; | ||
1069 | } | ||
1070 | 1186 | ||
1071 | if (rsp->fqs_active) { | 1187 | if (rcu_gp_in_progress(rsp)) { |
1072 | /* | 1188 | /* Grace period already in progress, don't start another. */ |
1073 | * This CPU needs a grace period, but force_quiescent_state() | 1189 | raw_spin_unlock_irq(&rnp->lock); |
1074 | * is running. Tell it to start one on this CPU's behalf. | 1190 | return 0; |
1075 | */ | ||
1076 | rsp->fqs_need_gp = 1; | ||
1077 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1078 | return; | ||
1079 | } | 1191 | } |
1080 | 1192 | ||
1081 | /* Advance to a new grace period and initialize state. */ | 1193 | /* Advance to a new grace period and initialize state. */ |
1082 | rsp->gpnum++; | 1194 | rsp->gpnum++; |
1083 | trace_rcu_grace_period(rsp->name, rsp->gpnum, "start"); | 1195 | trace_rcu_grace_period(rsp->name, rsp->gpnum, "start"); |
1084 | WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT); | ||
1085 | rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | ||
1086 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | ||
1087 | record_gp_stall_check_time(rsp); | 1196 | record_gp_stall_check_time(rsp); |
1088 | raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */ | 1197 | raw_spin_unlock_irq(&rnp->lock); |
1089 | 1198 | ||
1090 | /* Exclude any concurrent CPU-hotplug operations. */ | 1199 | /* Exclude any concurrent CPU-hotplug operations. */ |
1091 | raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ | 1200 | get_online_cpus(); |
1092 | 1201 | ||
1093 | /* | 1202 | /* |
1094 | * Set the quiescent-state-needed bits in all the rcu_node | 1203 | * Set the quiescent-state-needed bits in all the rcu_node |
1095 | * structures for all currently online CPUs in breadth-first | 1204 | * structures for all currently online CPUs in breadth-first order, |
1096 | * order, starting from the root rcu_node structure. This | 1205 | * starting from the root rcu_node structure, relying on the layout |
1097 | * operation relies on the layout of the hierarchy within the | 1206 | * of the tree within the rsp->node[] array. Note that other CPUs |
1098 | * rsp->node[] array. Note that other CPUs will access only | 1207 | * will access only the leaves of the hierarchy, thus seeing that no |
1099 | * the leaves of the hierarchy, which still indicate that no | ||
1100 | * grace period is in progress, at least until the corresponding | 1208 | * grace period is in progress, at least until the corresponding |
1101 | * leaf node has been initialized. In addition, we have excluded | 1209 | * leaf node has been initialized. In addition, we have excluded |
1102 | * CPU-hotplug operations. | 1210 | * CPU-hotplug operations. |
1103 | * | 1211 | * |
1104 | * Note that the grace period cannot complete until we finish | 1212 | * The grace period cannot complete until the initialization |
1105 | * the initialization process, as there will be at least one | 1213 | * process finishes, because this kthread handles both. |
1106 | * qsmask bit set in the root node until that time, namely the | ||
1107 | * one corresponding to this CPU, due to the fact that we have | ||
1108 | * irqs disabled. | ||
1109 | */ | 1214 | */ |
1110 | rcu_for_each_node_breadth_first(rsp, rnp) { | 1215 | rcu_for_each_node_breadth_first(rsp, rnp) { |
1111 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 1216 | raw_spin_lock_irq(&rnp->lock); |
1217 | rdp = this_cpu_ptr(rsp->rda); | ||
1112 | rcu_preempt_check_blocked_tasks(rnp); | 1218 | rcu_preempt_check_blocked_tasks(rnp); |
1113 | rnp->qsmask = rnp->qsmaskinit; | 1219 | rnp->qsmask = rnp->qsmaskinit; |
1114 | rnp->gpnum = rsp->gpnum; | 1220 | rnp->gpnum = rsp->gpnum; |
1221 | WARN_ON_ONCE(rnp->completed != rsp->completed); | ||
1115 | rnp->completed = rsp->completed; | 1222 | rnp->completed = rsp->completed; |
1116 | if (rnp == rdp->mynode) | 1223 | if (rnp == rdp->mynode) |
1117 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | 1224 | rcu_start_gp_per_cpu(rsp, rnp, rdp); |
@@ -1119,37 +1226,54 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
1119 | trace_rcu_grace_period_init(rsp->name, rnp->gpnum, | 1226 | trace_rcu_grace_period_init(rsp->name, rnp->gpnum, |
1120 | rnp->level, rnp->grplo, | 1227 | rnp->level, rnp->grplo, |
1121 | rnp->grphi, rnp->qsmask); | 1228 | rnp->grphi, rnp->qsmask); |
1122 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1229 | raw_spin_unlock_irq(&rnp->lock); |
1230 | #ifdef CONFIG_PROVE_RCU_DELAY | ||
1231 | if ((random32() % (rcu_num_nodes * 8)) == 0) | ||
1232 | schedule_timeout_uninterruptible(2); | ||
1233 | #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ | ||
1234 | cond_resched(); | ||
1123 | } | 1235 | } |
1124 | 1236 | ||
1125 | rnp = rcu_get_root(rsp); | 1237 | put_online_cpus(); |
1126 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 1238 | return 1; |
1127 | rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | ||
1128 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
1129 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
1130 | } | 1239 | } |
1131 | 1240 | ||
1132 | /* | 1241 | /* |
1133 | * Report a full set of quiescent states to the specified rcu_state | 1242 | * Do one round of quiescent-state forcing. |
1134 | * data structure. This involves cleaning up after the prior grace | ||
1135 | * period and letting rcu_start_gp() start up the next grace period | ||
1136 | * if one is needed. Note that the caller must hold rnp->lock, as | ||
1137 | * required by rcu_start_gp(), which will release it. | ||
1138 | */ | 1243 | */ |
1139 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) | 1244 | int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) |
1140 | __releases(rcu_get_root(rsp)->lock) | ||
1141 | { | 1245 | { |
1142 | unsigned long gp_duration; | 1246 | int fqs_state = fqs_state_in; |
1143 | struct rcu_node *rnp = rcu_get_root(rsp); | 1247 | struct rcu_node *rnp = rcu_get_root(rsp); |
1144 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | ||
1145 | 1248 | ||
1146 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 1249 | rsp->n_force_qs++; |
1250 | if (fqs_state == RCU_SAVE_DYNTICK) { | ||
1251 | /* Collect dyntick-idle snapshots. */ | ||
1252 | force_qs_rnp(rsp, dyntick_save_progress_counter); | ||
1253 | fqs_state = RCU_FORCE_QS; | ||
1254 | } else { | ||
1255 | /* Handle dyntick-idle and offline CPUs. */ | ||
1256 | force_qs_rnp(rsp, rcu_implicit_dynticks_qs); | ||
1257 | } | ||
1258 | /* Clear flag to prevent immediate re-entry. */ | ||
1259 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | ||
1260 | raw_spin_lock_irq(&rnp->lock); | ||
1261 | rsp->gp_flags &= ~RCU_GP_FLAG_FQS; | ||
1262 | raw_spin_unlock_irq(&rnp->lock); | ||
1263 | } | ||
1264 | return fqs_state; | ||
1265 | } | ||
1147 | 1266 | ||
1148 | /* | 1267 | /* |
1149 | * Ensure that all grace-period and pre-grace-period activity | 1268 | * Clean up after the old grace period. |
1150 | * is seen before the assignment to rsp->completed. | 1269 | */ |
1151 | */ | 1270 | static void rcu_gp_cleanup(struct rcu_state *rsp) |
1152 | smp_mb(); /* See above block comment. */ | 1271 | { |
1272 | unsigned long gp_duration; | ||
1273 | struct rcu_data *rdp; | ||
1274 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
1275 | |||
1276 | raw_spin_lock_irq(&rnp->lock); | ||
1153 | gp_duration = jiffies - rsp->gp_start; | 1277 | gp_duration = jiffies - rsp->gp_start; |
1154 | if (gp_duration > rsp->gp_max) | 1278 | if (gp_duration > rsp->gp_max) |
1155 | rsp->gp_max = gp_duration; | 1279 | rsp->gp_max = gp_duration; |
@@ -1161,35 +1285,149 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) | |||
1161 | * they can do to advance the grace period. It is therefore | 1285 | * they can do to advance the grace period. It is therefore |
1162 | * safe for us to drop the lock in order to mark the grace | 1286 | * safe for us to drop the lock in order to mark the grace |
1163 | * period as completed in all of the rcu_node structures. | 1287 | * period as completed in all of the rcu_node structures. |
1164 | * | ||
1165 | * But if this CPU needs another grace period, it will take | ||
1166 | * care of this while initializing the next grace period. | ||
1167 | * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL | ||
1168 | * because the callbacks have not yet been advanced: Those | ||
1169 | * callbacks are waiting on the grace period that just now | ||
1170 | * completed. | ||
1171 | */ | 1288 | */ |
1172 | if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) { | 1289 | raw_spin_unlock_irq(&rnp->lock); |
1173 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
1174 | 1290 | ||
1175 | /* | 1291 | /* |
1176 | * Propagate new ->completed value to rcu_node structures | 1292 | * Propagate new ->completed value to rcu_node structures so |
1177 | * so that other CPUs don't have to wait until the start | 1293 | * that other CPUs don't have to wait until the start of the next |
1178 | * of the next grace period to process their callbacks. | 1294 | * grace period to process their callbacks. This also avoids |
1179 | */ | 1295 | * some nasty RCU grace-period initialization races by forcing |
1180 | rcu_for_each_node_breadth_first(rsp, rnp) { | 1296 | * the end of the current grace period to be completely recorded in |
1181 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 1297 | * all of the rcu_node structures before the beginning of the next |
1182 | rnp->completed = rsp->gpnum; | 1298 | * grace period is recorded in any of the rcu_node structures. |
1183 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1299 | */ |
1184 | } | 1300 | rcu_for_each_node_breadth_first(rsp, rnp) { |
1185 | rnp = rcu_get_root(rsp); | 1301 | raw_spin_lock_irq(&rnp->lock); |
1186 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 1302 | rnp->completed = rsp->gpnum; |
1303 | raw_spin_unlock_irq(&rnp->lock); | ||
1304 | cond_resched(); | ||
1187 | } | 1305 | } |
1306 | rnp = rcu_get_root(rsp); | ||
1307 | raw_spin_lock_irq(&rnp->lock); | ||
1188 | 1308 | ||
1189 | rsp->completed = rsp->gpnum; /* Declare the grace period complete. */ | 1309 | rsp->completed = rsp->gpnum; /* Declare grace period done. */ |
1190 | trace_rcu_grace_period(rsp->name, rsp->completed, "end"); | 1310 | trace_rcu_grace_period(rsp->name, rsp->completed, "end"); |
1191 | rsp->fqs_state = RCU_GP_IDLE; | 1311 | rsp->fqs_state = RCU_GP_IDLE; |
1192 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 1312 | rdp = this_cpu_ptr(rsp->rda); |
1313 | if (cpu_needs_another_gp(rsp, rdp)) | ||
1314 | rsp->gp_flags = 1; | ||
1315 | raw_spin_unlock_irq(&rnp->lock); | ||
1316 | } | ||
1317 | |||
1318 | /* | ||
1319 | * Body of kthread that handles grace periods. | ||
1320 | */ | ||
1321 | static int __noreturn rcu_gp_kthread(void *arg) | ||
1322 | { | ||
1323 | int fqs_state; | ||
1324 | unsigned long j; | ||
1325 | int ret; | ||
1326 | struct rcu_state *rsp = arg; | ||
1327 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
1328 | |||
1329 | for (;;) { | ||
1330 | |||
1331 | /* Handle grace-period start. */ | ||
1332 | for (;;) { | ||
1333 | wait_event_interruptible(rsp->gp_wq, | ||
1334 | rsp->gp_flags & | ||
1335 | RCU_GP_FLAG_INIT); | ||
1336 | if ((rsp->gp_flags & RCU_GP_FLAG_INIT) && | ||
1337 | rcu_gp_init(rsp)) | ||
1338 | break; | ||
1339 | cond_resched(); | ||
1340 | flush_signals(current); | ||
1341 | } | ||
1342 | |||
1343 | /* Handle quiescent-state forcing. */ | ||
1344 | fqs_state = RCU_SAVE_DYNTICK; | ||
1345 | j = jiffies_till_first_fqs; | ||
1346 | if (j > HZ) { | ||
1347 | j = HZ; | ||
1348 | jiffies_till_first_fqs = HZ; | ||
1349 | } | ||
1350 | for (;;) { | ||
1351 | rsp->jiffies_force_qs = jiffies + j; | ||
1352 | ret = wait_event_interruptible_timeout(rsp->gp_wq, | ||
1353 | (rsp->gp_flags & RCU_GP_FLAG_FQS) || | ||
1354 | (!ACCESS_ONCE(rnp->qsmask) && | ||
1355 | !rcu_preempt_blocked_readers_cgp(rnp)), | ||
1356 | j); | ||
1357 | /* If grace period done, leave loop. */ | ||
1358 | if (!ACCESS_ONCE(rnp->qsmask) && | ||
1359 | !rcu_preempt_blocked_readers_cgp(rnp)) | ||
1360 | break; | ||
1361 | /* If time for quiescent-state forcing, do it. */ | ||
1362 | if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) { | ||
1363 | fqs_state = rcu_gp_fqs(rsp, fqs_state); | ||
1364 | cond_resched(); | ||
1365 | } else { | ||
1366 | /* Deal with stray signal. */ | ||
1367 | cond_resched(); | ||
1368 | flush_signals(current); | ||
1369 | } | ||
1370 | j = jiffies_till_next_fqs; | ||
1371 | if (j > HZ) { | ||
1372 | j = HZ; | ||
1373 | jiffies_till_next_fqs = HZ; | ||
1374 | } else if (j < 1) { | ||
1375 | j = 1; | ||
1376 | jiffies_till_next_fqs = 1; | ||
1377 | } | ||
1378 | } | ||
1379 | |||
1380 | /* Handle grace-period end. */ | ||
1381 | rcu_gp_cleanup(rsp); | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1385 | /* | ||
1386 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | ||
1387 | * in preparation for detecting the next grace period. The caller must hold | ||
1388 | * the root node's ->lock, which is released before return. Hard irqs must | ||
1389 | * be disabled. | ||
1390 | * | ||
1391 | * Note that it is legal for a dying CPU (which is marked as offline) to | ||
1392 | * invoke this function. This can happen when the dying CPU reports its | ||
1393 | * quiescent state. | ||
1394 | */ | ||
1395 | static void | ||
1396 | rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | ||
1397 | __releases(rcu_get_root(rsp)->lock) | ||
1398 | { | ||
1399 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | ||
1400 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
1401 | |||
1402 | if (!rsp->gp_kthread || | ||
1403 | !cpu_needs_another_gp(rsp, rdp)) { | ||
1404 | /* | ||
1405 | * Either we have not yet spawned the grace-period | ||
1406 | * task or this CPU does not need another grace period. | ||
1407 | * Either way, don't start a new grace period. | ||
1408 | */ | ||
1409 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1410 | return; | ||
1411 | } | ||
1412 | |||
1413 | rsp->gp_flags = RCU_GP_FLAG_INIT; | ||
1414 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1415 | wake_up(&rsp->gp_wq); | ||
1416 | } | ||
1417 | |||
1418 | /* | ||
1419 | * Report a full set of quiescent states to the specified rcu_state | ||
1420 | * data structure. This involves cleaning up after the prior grace | ||
1421 | * period and letting rcu_start_gp() start up the next grace period | ||
1422 | * if one is needed. Note that the caller must hold rnp->lock, as | ||
1423 | * required by rcu_start_gp(), which will release it. | ||
1424 | */ | ||
1425 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) | ||
1426 | __releases(rcu_get_root(rsp)->lock) | ||
1427 | { | ||
1428 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | ||
1429 | raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); | ||
1430 | wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ | ||
1193 | } | 1431 | } |
1194 | 1432 | ||
1195 | /* | 1433 | /* |
@@ -1258,7 +1496,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, | |||
1258 | * based on quiescent states detected in an earlier grace period! | 1496 | * based on quiescent states detected in an earlier grace period! |
1259 | */ | 1497 | */ |
1260 | static void | 1498 | static void |
1261 | rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp) | 1499 | rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) |
1262 | { | 1500 | { |
1263 | unsigned long flags; | 1501 | unsigned long flags; |
1264 | unsigned long mask; | 1502 | unsigned long mask; |
@@ -1266,7 +1504,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las | |||
1266 | 1504 | ||
1267 | rnp = rdp->mynode; | 1505 | rnp = rdp->mynode; |
1268 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1506 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1269 | if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) { | 1507 | if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || |
1508 | rnp->completed == rnp->gpnum) { | ||
1270 | 1509 | ||
1271 | /* | 1510 | /* |
1272 | * The grace period in which this quiescent state was | 1511 | * The grace period in which this quiescent state was |
@@ -1325,7 +1564,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1325 | * Tell RCU we are done (but rcu_report_qs_rdp() will be the | 1564 | * Tell RCU we are done (but rcu_report_qs_rdp() will be the |
1326 | * judge of that). | 1565 | * judge of that). |
1327 | */ | 1566 | */ |
1328 | rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum); | 1567 | rcu_report_qs_rdp(rdp->cpu, rsp, rdp); |
1329 | } | 1568 | } |
1330 | 1569 | ||
1331 | #ifdef CONFIG_HOTPLUG_CPU | 1570 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -1390,17 +1629,6 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
1390 | int i; | 1629 | int i; |
1391 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); | 1630 | struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); |
1392 | 1631 | ||
1393 | /* | ||
1394 | * If there is an rcu_barrier() operation in progress, then | ||
1395 | * only the task doing that operation is permitted to adopt | ||
1396 | * callbacks. To do otherwise breaks rcu_barrier() and friends | ||
1397 | * by causing them to fail to wait for the callbacks in the | ||
1398 | * orphanage. | ||
1399 | */ | ||
1400 | if (rsp->rcu_barrier_in_progress && | ||
1401 | rsp->rcu_barrier_in_progress != current) | ||
1402 | return; | ||
1403 | |||
1404 | /* Do the accounting first. */ | 1632 | /* Do the accounting first. */ |
1405 | rdp->qlen_lazy += rsp->qlen_lazy; | 1633 | rdp->qlen_lazy += rsp->qlen_lazy; |
1406 | rdp->qlen += rsp->qlen; | 1634 | rdp->qlen += rsp->qlen; |
@@ -1455,9 +1683,8 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | |||
1455 | * The CPU has been completely removed, and some other CPU is reporting | 1683 | * The CPU has been completely removed, and some other CPU is reporting |
1456 | * this fact from process context. Do the remainder of the cleanup, | 1684 | * this fact from process context. Do the remainder of the cleanup, |
1457 | * including orphaning the outgoing CPU's RCU callbacks, and also | 1685 | * including orphaning the outgoing CPU's RCU callbacks, and also |
1458 | * adopting them, if there is no _rcu_barrier() instance running. | 1686 | * adopting them. There can only be one CPU hotplug operation at a time, |
1459 | * There can only be one CPU hotplug operation at a time, so no other | 1687 | * so no other CPU can be attempting to update rcu_cpu_kthread_task. |
1460 | * CPU can be attempting to update rcu_cpu_kthread_task. | ||
1461 | */ | 1688 | */ |
1462 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | 1689 | static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) |
1463 | { | 1690 | { |
@@ -1468,8 +1695,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
1468 | struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ | 1695 | struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ |
1469 | 1696 | ||
1470 | /* Adjust any no-longer-needed kthreads. */ | 1697 | /* Adjust any no-longer-needed kthreads. */ |
1471 | rcu_stop_cpu_kthread(cpu); | 1698 | rcu_boost_kthread_setaffinity(rnp, -1); |
1472 | rcu_node_kthread_setaffinity(rnp, -1); | ||
1473 | 1699 | ||
1474 | /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ | 1700 | /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ |
1475 | 1701 | ||
@@ -1515,14 +1741,13 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) | |||
1515 | WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, | 1741 | WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, |
1516 | "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", | 1742 | "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", |
1517 | cpu, rdp->qlen, rdp->nxtlist); | 1743 | cpu, rdp->qlen, rdp->nxtlist); |
1744 | init_callback_list(rdp); | ||
1745 | /* Disallow further callbacks on this CPU. */ | ||
1746 | rdp->nxttail[RCU_NEXT_TAIL] = NULL; | ||
1518 | } | 1747 | } |
1519 | 1748 | ||
1520 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 1749 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
1521 | 1750 | ||
1522 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
1523 | { | ||
1524 | } | ||
1525 | |||
1526 | static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) | 1751 | static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) |
1527 | { | 1752 | { |
1528 | } | 1753 | } |
@@ -1687,6 +1912,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1687 | struct rcu_node *rnp; | 1912 | struct rcu_node *rnp; |
1688 | 1913 | ||
1689 | rcu_for_each_leaf_node(rsp, rnp) { | 1914 | rcu_for_each_leaf_node(rsp, rnp) { |
1915 | cond_resched(); | ||
1690 | mask = 0; | 1916 | mask = 0; |
1691 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1917 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1692 | if (!rcu_gp_in_progress(rsp)) { | 1918 | if (!rcu_gp_in_progress(rsp)) { |
@@ -1723,72 +1949,39 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1723 | * Force quiescent states on reluctant CPUs, and also detect which | 1949 | * Force quiescent states on reluctant CPUs, and also detect which |
1724 | * CPUs are in dyntick-idle mode. | 1950 | * CPUs are in dyntick-idle mode. |
1725 | */ | 1951 | */ |
1726 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | 1952 | static void force_quiescent_state(struct rcu_state *rsp) |
1727 | { | 1953 | { |
1728 | unsigned long flags; | 1954 | unsigned long flags; |
1729 | struct rcu_node *rnp = rcu_get_root(rsp); | 1955 | bool ret; |
1730 | 1956 | struct rcu_node *rnp; | |
1731 | trace_rcu_utilization("Start fqs"); | 1957 | struct rcu_node *rnp_old = NULL; |
1732 | if (!rcu_gp_in_progress(rsp)) { | 1958 | |
1733 | trace_rcu_utilization("End fqs"); | 1959 | /* Funnel through hierarchy to reduce memory contention. */ |
1734 | return; /* No grace period in progress, nothing to force. */ | 1960 | rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode; |
1735 | } | 1961 | for (; rnp != NULL; rnp = rnp->parent) { |
1736 | if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { | 1962 | ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || |
1737 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ | 1963 | !raw_spin_trylock(&rnp->fqslock); |
1738 | trace_rcu_utilization("End fqs"); | 1964 | if (rnp_old != NULL) |
1739 | return; /* Someone else is already on the job. */ | 1965 | raw_spin_unlock(&rnp_old->fqslock); |
1740 | } | 1966 | if (ret) { |
1741 | if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) | 1967 | rsp->n_force_qs_lh++; |
1742 | goto unlock_fqs_ret; /* no emergency and done recently. */ | 1968 | return; |
1743 | rsp->n_force_qs++; | 1969 | } |
1744 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 1970 | rnp_old = rnp; |
1745 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | ||
1746 | if(!rcu_gp_in_progress(rsp)) { | ||
1747 | rsp->n_force_qs_ngp++; | ||
1748 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
1749 | goto unlock_fqs_ret; /* no GP in progress, time updated. */ | ||
1750 | } | ||
1751 | rsp->fqs_active = 1; | ||
1752 | switch (rsp->fqs_state) { | ||
1753 | case RCU_GP_IDLE: | ||
1754 | case RCU_GP_INIT: | ||
1755 | |||
1756 | break; /* grace period idle or initializing, ignore. */ | ||
1757 | |||
1758 | case RCU_SAVE_DYNTICK: | ||
1759 | |||
1760 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
1761 | |||
1762 | /* Record dyntick-idle state. */ | ||
1763 | force_qs_rnp(rsp, dyntick_save_progress_counter); | ||
1764 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | ||
1765 | if (rcu_gp_in_progress(rsp)) | ||
1766 | rsp->fqs_state = RCU_FORCE_QS; | ||
1767 | break; | ||
1768 | |||
1769 | case RCU_FORCE_QS: | ||
1770 | |||
1771 | /* Check dyntick-idle state, send IPI to laggarts. */ | ||
1772 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
1773 | force_qs_rnp(rsp, rcu_implicit_dynticks_qs); | ||
1774 | |||
1775 | /* Leave state in case more forcing is required. */ | ||
1776 | |||
1777 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | ||
1778 | break; | ||
1779 | } | 1971 | } |
1780 | rsp->fqs_active = 0; | 1972 | /* rnp_old == rcu_get_root(rsp), rnp == NULL. */ |
1781 | if (rsp->fqs_need_gp) { | 1973 | |
1782 | raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ | 1974 | /* Reached the root of the rcu_node tree, acquire lock. */ |
1783 | rsp->fqs_need_gp = 0; | 1975 | raw_spin_lock_irqsave(&rnp_old->lock, flags); |
1784 | rcu_start_gp(rsp, flags); /* releases rnp->lock */ | 1976 | raw_spin_unlock(&rnp_old->fqslock); |
1785 | trace_rcu_utilization("End fqs"); | 1977 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
1786 | return; | 1978 | rsp->n_force_qs_lh++; |
1979 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); | ||
1980 | return; /* Someone beat us to it. */ | ||
1787 | } | 1981 | } |
1788 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | 1982 | rsp->gp_flags |= RCU_GP_FLAG_FQS; |
1789 | unlock_fqs_ret: | 1983 | raw_spin_unlock_irqrestore(&rnp_old->lock, flags); |
1790 | raw_spin_unlock_irqrestore(&rsp->fqslock, flags); | 1984 | wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ |
1791 | trace_rcu_utilization("End fqs"); | ||
1792 | } | 1985 | } |
1793 | 1986 | ||
1794 | /* | 1987 | /* |
@@ -1805,13 +1998,6 @@ __rcu_process_callbacks(struct rcu_state *rsp) | |||
1805 | WARN_ON_ONCE(rdp->beenonline == 0); | 1998 | WARN_ON_ONCE(rdp->beenonline == 0); |
1806 | 1999 | ||
1807 | /* | 2000 | /* |
1808 | * If an RCU GP has gone long enough, go check for dyntick | ||
1809 | * idle CPUs and, if needed, send resched IPIs. | ||
1810 | */ | ||
1811 | if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) | ||
1812 | force_quiescent_state(rsp, 1); | ||
1813 | |||
1814 | /* | ||
1815 | * Advance callbacks in response to end of earlier grace | 2001 | * Advance callbacks in response to end of earlier grace |
1816 | * period that some other CPU ended. | 2002 | * period that some other CPU ended. |
1817 | */ | 2003 | */ |
@@ -1838,6 +2024,8 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1838 | { | 2024 | { |
1839 | struct rcu_state *rsp; | 2025 | struct rcu_state *rsp; |
1840 | 2026 | ||
2027 | if (cpu_is_offline(smp_processor_id())) | ||
2028 | return; | ||
1841 | trace_rcu_utilization("Start RCU core"); | 2029 | trace_rcu_utilization("Start RCU core"); |
1842 | for_each_rcu_flavor(rsp) | 2030 | for_each_rcu_flavor(rsp) |
1843 | __rcu_process_callbacks(rsp); | 2031 | __rcu_process_callbacks(rsp); |
@@ -1909,12 +2097,11 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, | |||
1909 | rdp->blimit = LONG_MAX; | 2097 | rdp->blimit = LONG_MAX; |
1910 | if (rsp->n_force_qs == rdp->n_force_qs_snap && | 2098 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1911 | *rdp->nxttail[RCU_DONE_TAIL] != head) | 2099 | *rdp->nxttail[RCU_DONE_TAIL] != head) |
1912 | force_quiescent_state(rsp, 0); | 2100 | force_quiescent_state(rsp); |
1913 | rdp->n_force_qs_snap = rsp->n_force_qs; | 2101 | rdp->n_force_qs_snap = rsp->n_force_qs; |
1914 | rdp->qlen_last_fqs_check = rdp->qlen; | 2102 | rdp->qlen_last_fqs_check = rdp->qlen; |
1915 | } | 2103 | } |
1916 | } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) | 2104 | } |
1917 | force_quiescent_state(rsp, 1); | ||
1918 | } | 2105 | } |
1919 | 2106 | ||
1920 | static void | 2107 | static void |
@@ -1929,8 +2116,6 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1929 | head->func = func; | 2116 | head->func = func; |
1930 | head->next = NULL; | 2117 | head->next = NULL; |
1931 | 2118 | ||
1932 | smp_mb(); /* Ensure RCU update seen before callback registry. */ | ||
1933 | |||
1934 | /* | 2119 | /* |
1935 | * Opportunistically note grace-period endings and beginnings. | 2120 | * Opportunistically note grace-period endings and beginnings. |
1936 | * Note that we might see a beginning right after we see an | 2121 | * Note that we might see a beginning right after we see an |
@@ -1941,6 +2126,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1941 | rdp = this_cpu_ptr(rsp->rda); | 2126 | rdp = this_cpu_ptr(rsp->rda); |
1942 | 2127 | ||
1943 | /* Add the callback to our list. */ | 2128 | /* Add the callback to our list. */ |
2129 | if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL)) { | ||
2130 | /* _call_rcu() is illegal on offline CPU; leak the callback. */ | ||
2131 | WARN_ON_ONCE(1); | ||
2132 | local_irq_restore(flags); | ||
2133 | return; | ||
2134 | } | ||
1944 | ACCESS_ONCE(rdp->qlen)++; | 2135 | ACCESS_ONCE(rdp->qlen)++; |
1945 | if (lazy) | 2136 | if (lazy) |
1946 | rdp->qlen_lazy++; | 2137 | rdp->qlen_lazy++; |
@@ -2195,17 +2386,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2195 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | 2386 | /* Is the RCU core waiting for a quiescent state from this CPU? */ |
2196 | if (rcu_scheduler_fully_active && | 2387 | if (rcu_scheduler_fully_active && |
2197 | rdp->qs_pending && !rdp->passed_quiesce) { | 2388 | rdp->qs_pending && !rdp->passed_quiesce) { |
2198 | |||
2199 | /* | ||
2200 | * If force_quiescent_state() coming soon and this CPU | ||
2201 | * needs a quiescent state, and this is either RCU-sched | ||
2202 | * or RCU-bh, force a local reschedule. | ||
2203 | */ | ||
2204 | rdp->n_rp_qs_pending++; | 2389 | rdp->n_rp_qs_pending++; |
2205 | if (!rdp->preemptible && | ||
2206 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, | ||
2207 | jiffies)) | ||
2208 | set_need_resched(); | ||
2209 | } else if (rdp->qs_pending && rdp->passed_quiesce) { | 2390 | } else if (rdp->qs_pending && rdp->passed_quiesce) { |
2210 | rdp->n_rp_report_qs++; | 2391 | rdp->n_rp_report_qs++; |
2211 | return 1; | 2392 | return 1; |
@@ -2235,13 +2416,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
2235 | return 1; | 2416 | return 1; |
2236 | } | 2417 | } |
2237 | 2418 | ||
2238 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ | ||
2239 | if (rcu_gp_in_progress(rsp) && | ||
2240 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) { | ||
2241 | rdp->n_rp_need_fqs++; | ||
2242 | return 1; | ||
2243 | } | ||
2244 | |||
2245 | /* nothing to do */ | 2419 | /* nothing to do */ |
2246 | rdp->n_rp_need_nothing++; | 2420 | rdp->n_rp_need_nothing++; |
2247 | return 0; | 2421 | return 0; |
@@ -2326,13 +2500,10 @@ static void rcu_barrier_func(void *type) | |||
2326 | static void _rcu_barrier(struct rcu_state *rsp) | 2500 | static void _rcu_barrier(struct rcu_state *rsp) |
2327 | { | 2501 | { |
2328 | int cpu; | 2502 | int cpu; |
2329 | unsigned long flags; | ||
2330 | struct rcu_data *rdp; | 2503 | struct rcu_data *rdp; |
2331 | struct rcu_data rd; | ||
2332 | unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); | 2504 | unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); |
2333 | unsigned long snap_done; | 2505 | unsigned long snap_done; |
2334 | 2506 | ||
2335 | init_rcu_head_on_stack(&rd.barrier_head); | ||
2336 | _rcu_barrier_trace(rsp, "Begin", -1, snap); | 2507 | _rcu_barrier_trace(rsp, "Begin", -1, snap); |
2337 | 2508 | ||
2338 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ | 2509 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ |
@@ -2372,70 +2543,30 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2372 | /* | 2543 | /* |
2373 | * Initialize the count to one rather than to zero in order to | 2544 | * Initialize the count to one rather than to zero in order to |
2374 | * avoid a too-soon return to zero in case of a short grace period | 2545 | * avoid a too-soon return to zero in case of a short grace period |
2375 | * (or preemption of this task). Also flag this task as doing | 2546 | * (or preemption of this task). Exclude CPU-hotplug operations |
2376 | * an rcu_barrier(). This will prevent anyone else from adopting | 2547 | * to ensure that no offline CPU has callbacks queued. |
2377 | * orphaned callbacks, which could cause otherwise failure if a | ||
2378 | * CPU went offline and quickly came back online. To see this, | ||
2379 | * consider the following sequence of events: | ||
2380 | * | ||
2381 | * 1. We cause CPU 0 to post an rcu_barrier_callback() callback. | ||
2382 | * 2. CPU 1 goes offline, orphaning its callbacks. | ||
2383 | * 3. CPU 0 adopts CPU 1's orphaned callbacks. | ||
2384 | * 4. CPU 1 comes back online. | ||
2385 | * 5. We cause CPU 1 to post an rcu_barrier_callback() callback. | ||
2386 | * 6. Both rcu_barrier_callback() callbacks are invoked, awakening | ||
2387 | * us -- but before CPU 1's orphaned callbacks are invoked!!! | ||
2388 | */ | 2548 | */ |
2389 | init_completion(&rsp->barrier_completion); | 2549 | init_completion(&rsp->barrier_completion); |
2390 | atomic_set(&rsp->barrier_cpu_count, 1); | 2550 | atomic_set(&rsp->barrier_cpu_count, 1); |
2391 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 2551 | get_online_cpus(); |
2392 | rsp->rcu_barrier_in_progress = current; | ||
2393 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
2394 | 2552 | ||
2395 | /* | 2553 | /* |
2396 | * Force every CPU with callbacks to register a new callback | 2554 | * Force each CPU with callbacks to register a new callback. |
2397 | * that will tell us when all the preceding callbacks have | 2555 | * When that callback is invoked, we will know that all of the |
2398 | * been invoked. If an offline CPU has callbacks, wait for | 2556 | * corresponding CPU's preceding callbacks have been invoked. |
2399 | * it to either come back online or to finish orphaning those | ||
2400 | * callbacks. | ||
2401 | */ | 2557 | */ |
2402 | for_each_possible_cpu(cpu) { | 2558 | for_each_online_cpu(cpu) { |
2403 | preempt_disable(); | ||
2404 | rdp = per_cpu_ptr(rsp->rda, cpu); | 2559 | rdp = per_cpu_ptr(rsp->rda, cpu); |
2405 | if (cpu_is_offline(cpu)) { | 2560 | if (ACCESS_ONCE(rdp->qlen)) { |
2406 | _rcu_barrier_trace(rsp, "Offline", cpu, | ||
2407 | rsp->n_barrier_done); | ||
2408 | preempt_enable(); | ||
2409 | while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) | ||
2410 | schedule_timeout_interruptible(1); | ||
2411 | } else if (ACCESS_ONCE(rdp->qlen)) { | ||
2412 | _rcu_barrier_trace(rsp, "OnlineQ", cpu, | 2561 | _rcu_barrier_trace(rsp, "OnlineQ", cpu, |
2413 | rsp->n_barrier_done); | 2562 | rsp->n_barrier_done); |
2414 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); | 2563 | smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); |
2415 | preempt_enable(); | ||
2416 | } else { | 2564 | } else { |
2417 | _rcu_barrier_trace(rsp, "OnlineNQ", cpu, | 2565 | _rcu_barrier_trace(rsp, "OnlineNQ", cpu, |
2418 | rsp->n_barrier_done); | 2566 | rsp->n_barrier_done); |
2419 | preempt_enable(); | ||
2420 | } | 2567 | } |
2421 | } | 2568 | } |
2422 | 2569 | put_online_cpus(); | |
2423 | /* | ||
2424 | * Now that all online CPUs have rcu_barrier_callback() callbacks | ||
2425 | * posted, we can adopt all of the orphaned callbacks and place | ||
2426 | * an rcu_barrier_callback() callback after them. When that is done, | ||
2427 | * we are guaranteed to have an rcu_barrier_callback() callback | ||
2428 | * following every callback that could possibly have been | ||
2429 | * registered before _rcu_barrier() was called. | ||
2430 | */ | ||
2431 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | ||
2432 | rcu_adopt_orphan_cbs(rsp); | ||
2433 | rsp->rcu_barrier_in_progress = NULL; | ||
2434 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
2435 | atomic_inc(&rsp->barrier_cpu_count); | ||
2436 | smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ | ||
2437 | rd.rsp = rsp; | ||
2438 | rsp->call(&rd.barrier_head, rcu_barrier_callback); | ||
2439 | 2570 | ||
2440 | /* | 2571 | /* |
2441 | * Now that we have an rcu_barrier_callback() callback on each | 2572 | * Now that we have an rcu_barrier_callback() callback on each |
@@ -2456,8 +2587,6 @@ static void _rcu_barrier(struct rcu_state *rsp) | |||
2456 | 2587 | ||
2457 | /* Other rcu_barrier() invocations can now safely proceed. */ | 2588 | /* Other rcu_barrier() invocations can now safely proceed. */ |
2458 | mutex_unlock(&rsp->barrier_mutex); | 2589 | mutex_unlock(&rsp->barrier_mutex); |
2459 | |||
2460 | destroy_rcu_head_on_stack(&rd.barrier_head); | ||
2461 | } | 2590 | } |
2462 | 2591 | ||
2463 | /** | 2592 | /** |
@@ -2497,6 +2626,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
2497 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 2626 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
2498 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); | 2627 | WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); |
2499 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); | 2628 | WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); |
2629 | #ifdef CONFIG_RCU_USER_QS | ||
2630 | WARN_ON_ONCE(rdp->dynticks->in_user); | ||
2631 | #endif | ||
2500 | rdp->cpu = cpu; | 2632 | rdp->cpu = cpu; |
2501 | rdp->rsp = rsp; | 2633 | rdp->rsp = rsp; |
2502 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 2634 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -2523,6 +2655,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2523 | rdp->qlen_last_fqs_check = 0; | 2655 | rdp->qlen_last_fqs_check = 0; |
2524 | rdp->n_force_qs_snap = rsp->n_force_qs; | 2656 | rdp->n_force_qs_snap = rsp->n_force_qs; |
2525 | rdp->blimit = blimit; | 2657 | rdp->blimit = blimit; |
2658 | init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ | ||
2526 | rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; | 2659 | rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
2527 | atomic_set(&rdp->dynticks->dynticks, | 2660 | atomic_set(&rdp->dynticks->dynticks, |
2528 | (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); | 2661 | (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); |
@@ -2555,7 +2688,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2555 | rdp->completed = rnp->completed; | 2688 | rdp->completed = rnp->completed; |
2556 | rdp->passed_quiesce = 0; | 2689 | rdp->passed_quiesce = 0; |
2557 | rdp->qs_pending = 0; | 2690 | rdp->qs_pending = 0; |
2558 | rdp->passed_quiesce_gpnum = rnp->gpnum - 1; | ||
2559 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl"); | 2691 | trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl"); |
2560 | } | 2692 | } |
2561 | raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ | 2693 | raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ |
@@ -2594,12 +2726,10 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
2594 | break; | 2726 | break; |
2595 | case CPU_ONLINE: | 2727 | case CPU_ONLINE: |
2596 | case CPU_DOWN_FAILED: | 2728 | case CPU_DOWN_FAILED: |
2597 | rcu_node_kthread_setaffinity(rnp, -1); | 2729 | rcu_boost_kthread_setaffinity(rnp, -1); |
2598 | rcu_cpu_kthread_setrt(cpu, 1); | ||
2599 | break; | 2730 | break; |
2600 | case CPU_DOWN_PREPARE: | 2731 | case CPU_DOWN_PREPARE: |
2601 | rcu_node_kthread_setaffinity(rnp, cpu); | 2732 | rcu_boost_kthread_setaffinity(rnp, cpu); |
2602 | rcu_cpu_kthread_setrt(cpu, 0); | ||
2603 | break; | 2733 | break; |
2604 | case CPU_DYING: | 2734 | case CPU_DYING: |
2605 | case CPU_DYING_FROZEN: | 2735 | case CPU_DYING_FROZEN: |
@@ -2627,6 +2757,28 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
2627 | } | 2757 | } |
2628 | 2758 | ||
2629 | /* | 2759 | /* |
2760 | * Spawn the kthread that handles this RCU flavor's grace periods. | ||
2761 | */ | ||
2762 | static int __init rcu_spawn_gp_kthread(void) | ||
2763 | { | ||
2764 | unsigned long flags; | ||
2765 | struct rcu_node *rnp; | ||
2766 | struct rcu_state *rsp; | ||
2767 | struct task_struct *t; | ||
2768 | |||
2769 | for_each_rcu_flavor(rsp) { | ||
2770 | t = kthread_run(rcu_gp_kthread, rsp, rsp->name); | ||
2771 | BUG_ON(IS_ERR(t)); | ||
2772 | rnp = rcu_get_root(rsp); | ||
2773 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
2774 | rsp->gp_kthread = t; | ||
2775 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
2776 | } | ||
2777 | return 0; | ||
2778 | } | ||
2779 | early_initcall(rcu_spawn_gp_kthread); | ||
2780 | |||
2781 | /* | ||
2630 | * This function is invoked towards the end of the scheduler's initialization | 2782 | * This function is invoked towards the end of the scheduler's initialization |
2631 | * process. Before this is called, the idle task might contain | 2783 | * process. Before this is called, the idle task might contain |
2632 | * RCU read-side critical sections (during which time, this idle | 2784 | * RCU read-side critical sections (during which time, this idle |
@@ -2661,7 +2813,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) | |||
2661 | int cprv; | 2813 | int cprv; |
2662 | int i; | 2814 | int i; |
2663 | 2815 | ||
2664 | cprv = NR_CPUS; | 2816 | cprv = nr_cpu_ids; |
2665 | for (i = rcu_num_lvls - 1; i >= 0; i--) { | 2817 | for (i = rcu_num_lvls - 1; i >= 0; i--) { |
2666 | ccur = rsp->levelcnt[i]; | 2818 | ccur = rsp->levelcnt[i]; |
2667 | rsp->levelspread[i] = (cprv + ccur - 1) / ccur; | 2819 | rsp->levelspread[i] = (cprv + ccur - 1) / ccur; |
@@ -2676,10 +2828,14 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) | |||
2676 | static void __init rcu_init_one(struct rcu_state *rsp, | 2828 | static void __init rcu_init_one(struct rcu_state *rsp, |
2677 | struct rcu_data __percpu *rda) | 2829 | struct rcu_data __percpu *rda) |
2678 | { | 2830 | { |
2679 | static char *buf[] = { "rcu_node_level_0", | 2831 | static char *buf[] = { "rcu_node_0", |
2680 | "rcu_node_level_1", | 2832 | "rcu_node_1", |
2681 | "rcu_node_level_2", | 2833 | "rcu_node_2", |
2682 | "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */ | 2834 | "rcu_node_3" }; /* Match MAX_RCU_LVLS */ |
2835 | static char *fqs[] = { "rcu_node_fqs_0", | ||
2836 | "rcu_node_fqs_1", | ||
2837 | "rcu_node_fqs_2", | ||
2838 | "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ | ||
2683 | int cpustride = 1; | 2839 | int cpustride = 1; |
2684 | int i; | 2840 | int i; |
2685 | int j; | 2841 | int j; |
@@ -2704,7 +2860,11 @@ static void __init rcu_init_one(struct rcu_state *rsp, | |||
2704 | raw_spin_lock_init(&rnp->lock); | 2860 | raw_spin_lock_init(&rnp->lock); |
2705 | lockdep_set_class_and_name(&rnp->lock, | 2861 | lockdep_set_class_and_name(&rnp->lock, |
2706 | &rcu_node_class[i], buf[i]); | 2862 | &rcu_node_class[i], buf[i]); |
2707 | rnp->gpnum = 0; | 2863 | raw_spin_lock_init(&rnp->fqslock); |
2864 | lockdep_set_class_and_name(&rnp->fqslock, | ||
2865 | &rcu_fqs_class[i], fqs[i]); | ||
2866 | rnp->gpnum = rsp->gpnum; | ||
2867 | rnp->completed = rsp->completed; | ||
2708 | rnp->qsmask = 0; | 2868 | rnp->qsmask = 0; |
2709 | rnp->qsmaskinit = 0; | 2869 | rnp->qsmaskinit = 0; |
2710 | rnp->grplo = j * cpustride; | 2870 | rnp->grplo = j * cpustride; |
@@ -2727,6 +2887,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, | |||
2727 | } | 2887 | } |
2728 | 2888 | ||
2729 | rsp->rda = rda; | 2889 | rsp->rda = rda; |
2890 | init_waitqueue_head(&rsp->gp_wq); | ||
2730 | rnp = rsp->level[rcu_num_lvls - 1]; | 2891 | rnp = rsp->level[rcu_num_lvls - 1]; |
2731 | for_each_possible_cpu(i) { | 2892 | for_each_possible_cpu(i) { |
2732 | while (i > rnp->grphi) | 2893 | while (i > rnp->grphi) |
@@ -2750,7 +2911,8 @@ static void __init rcu_init_geometry(void) | |||
2750 | int rcu_capacity[MAX_RCU_LVLS + 1]; | 2911 | int rcu_capacity[MAX_RCU_LVLS + 1]; |
2751 | 2912 | ||
2752 | /* If the compile-time values are accurate, just leave. */ | 2913 | /* If the compile-time values are accurate, just leave. */ |
2753 | if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF) | 2914 | if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF && |
2915 | nr_cpu_ids == NR_CPUS) | ||
2754 | return; | 2916 | return; |
2755 | 2917 | ||
2756 | /* | 2918 | /* |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 4d29169f2124..5faf05d68326 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -102,6 +102,10 @@ struct rcu_dynticks { | |||
102 | /* idle-period nonlazy_posted snapshot. */ | 102 | /* idle-period nonlazy_posted snapshot. */ |
103 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ | 103 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ |
104 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 104 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
105 | #ifdef CONFIG_RCU_USER_QS | ||
106 | bool ignore_user_qs; /* Treat userspace as extended QS or not */ | ||
107 | bool in_user; /* Is the CPU in userland from RCU POV? */ | ||
108 | #endif | ||
105 | }; | 109 | }; |
106 | 110 | ||
107 | /* RCU's kthread states for tracing. */ | 111 | /* RCU's kthread states for tracing. */ |
@@ -196,12 +200,7 @@ struct rcu_node { | |||
196 | /* Refused to boost: not sure why, though. */ | 200 | /* Refused to boost: not sure why, though. */ |
197 | /* This can happen due to race conditions. */ | 201 | /* This can happen due to race conditions. */ |
198 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 202 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
199 | struct task_struct *node_kthread_task; | 203 | raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; |
200 | /* kthread that takes care of this rcu_node */ | ||
201 | /* structure, for example, awakening the */ | ||
202 | /* per-CPU kthreads as needed. */ | ||
203 | unsigned int node_kthread_status; | ||
204 | /* State of node_kthread_task for tracing. */ | ||
205 | } ____cacheline_internodealigned_in_smp; | 204 | } ____cacheline_internodealigned_in_smp; |
206 | 205 | ||
207 | /* | 206 | /* |
@@ -245,8 +244,6 @@ struct rcu_data { | |||
245 | /* in order to detect GP end. */ | 244 | /* in order to detect GP end. */ |
246 | unsigned long gpnum; /* Highest gp number that this CPU */ | 245 | unsigned long gpnum; /* Highest gp number that this CPU */ |
247 | /* is aware of having started. */ | 246 | /* is aware of having started. */ |
248 | unsigned long passed_quiesce_gpnum; | ||
249 | /* gpnum at time of quiescent state. */ | ||
250 | bool passed_quiesce; /* User-mode/idle loop etc. */ | 247 | bool passed_quiesce; /* User-mode/idle loop etc. */ |
251 | bool qs_pending; /* Core waits for quiesc state. */ | 248 | bool qs_pending; /* Core waits for quiesc state. */ |
252 | bool beenonline; /* CPU online at least once. */ | 249 | bool beenonline; /* CPU online at least once. */ |
@@ -312,11 +309,13 @@ struct rcu_data { | |||
312 | unsigned long n_rp_cpu_needs_gp; | 309 | unsigned long n_rp_cpu_needs_gp; |
313 | unsigned long n_rp_gp_completed; | 310 | unsigned long n_rp_gp_completed; |
314 | unsigned long n_rp_gp_started; | 311 | unsigned long n_rp_gp_started; |
315 | unsigned long n_rp_need_fqs; | ||
316 | unsigned long n_rp_need_nothing; | 312 | unsigned long n_rp_need_nothing; |
317 | 313 | ||
318 | /* 6) _rcu_barrier() callback. */ | 314 | /* 6) _rcu_barrier() and OOM callbacks. */ |
319 | struct rcu_head barrier_head; | 315 | struct rcu_head barrier_head; |
316 | #ifdef CONFIG_RCU_FAST_NO_HZ | ||
317 | struct rcu_head oom_head; | ||
318 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | ||
320 | 319 | ||
321 | int cpu; | 320 | int cpu; |
322 | struct rcu_state *rsp; | 321 | struct rcu_state *rsp; |
@@ -375,20 +374,17 @@ struct rcu_state { | |||
375 | 374 | ||
376 | u8 fqs_state ____cacheline_internodealigned_in_smp; | 375 | u8 fqs_state ____cacheline_internodealigned_in_smp; |
377 | /* Force QS state. */ | 376 | /* Force QS state. */ |
378 | u8 fqs_active; /* force_quiescent_state() */ | ||
379 | /* is running. */ | ||
380 | u8 fqs_need_gp; /* A CPU was prevented from */ | ||
381 | /* starting a new grace */ | ||
382 | /* period because */ | ||
383 | /* force_quiescent_state() */ | ||
384 | /* was running. */ | ||
385 | u8 boost; /* Subject to priority boost. */ | 377 | u8 boost; /* Subject to priority boost. */ |
386 | unsigned long gpnum; /* Current gp number. */ | 378 | unsigned long gpnum; /* Current gp number. */ |
387 | unsigned long completed; /* # of last completed gp. */ | 379 | unsigned long completed; /* # of last completed gp. */ |
380 | struct task_struct *gp_kthread; /* Task for grace periods. */ | ||
381 | wait_queue_head_t gp_wq; /* Where GP task waits. */ | ||
382 | int gp_flags; /* Commands for GP task. */ | ||
388 | 383 | ||
389 | /* End of fields guarded by root rcu_node's lock. */ | 384 | /* End of fields guarded by root rcu_node's lock. */ |
390 | 385 | ||
391 | raw_spinlock_t onofflock; /* exclude on/offline and */ | 386 | raw_spinlock_t onofflock ____cacheline_internodealigned_in_smp; |
387 | /* exclude on/offline and */ | ||
392 | /* starting new GP. */ | 388 | /* starting new GP. */ |
393 | struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ | 389 | struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ |
394 | /* need a grace period. */ | 390 | /* need a grace period. */ |
@@ -398,16 +394,11 @@ struct rcu_state { | |||
398 | struct rcu_head **orphan_donetail; /* Tail of above. */ | 394 | struct rcu_head **orphan_donetail; /* Tail of above. */ |
399 | long qlen_lazy; /* Number of lazy callbacks. */ | 395 | long qlen_lazy; /* Number of lazy callbacks. */ |
400 | long qlen; /* Total number of callbacks. */ | 396 | long qlen; /* Total number of callbacks. */ |
401 | struct task_struct *rcu_barrier_in_progress; | ||
402 | /* Task doing rcu_barrier(), */ | ||
403 | /* or NULL if no barrier. */ | ||
404 | struct mutex barrier_mutex; /* Guards barrier fields. */ | 397 | struct mutex barrier_mutex; /* Guards barrier fields. */ |
405 | atomic_t barrier_cpu_count; /* # CPUs waiting on. */ | 398 | atomic_t barrier_cpu_count; /* # CPUs waiting on. */ |
406 | struct completion barrier_completion; /* Wake at barrier end. */ | 399 | struct completion barrier_completion; /* Wake at barrier end. */ |
407 | unsigned long n_barrier_done; /* ++ at start and end of */ | 400 | unsigned long n_barrier_done; /* ++ at start and end of */ |
408 | /* _rcu_barrier(). */ | 401 | /* _rcu_barrier(). */ |
409 | raw_spinlock_t fqslock; /* Only one task forcing */ | ||
410 | /* quiescent states. */ | ||
411 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | 402 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
412 | /* force_quiescent_state(). */ | 403 | /* force_quiescent_state(). */ |
413 | unsigned long n_force_qs; /* Number of calls to */ | 404 | unsigned long n_force_qs; /* Number of calls to */ |
@@ -426,6 +417,10 @@ struct rcu_state { | |||
426 | struct list_head flavors; /* List of RCU flavors. */ | 417 | struct list_head flavors; /* List of RCU flavors. */ |
427 | }; | 418 | }; |
428 | 419 | ||
420 | /* Values for rcu_state structure's gp_flags field. */ | ||
421 | #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ | ||
422 | #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ | ||
423 | |||
429 | extern struct list_head rcu_struct_flavors; | 424 | extern struct list_head rcu_struct_flavors; |
430 | #define for_each_rcu_flavor(rsp) \ | 425 | #define for_each_rcu_flavor(rsp) \ |
431 | list_for_each_entry((rsp), &rcu_struct_flavors, flavors) | 426 | list_for_each_entry((rsp), &rcu_struct_flavors, flavors) |
@@ -468,7 +463,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | |||
468 | #ifdef CONFIG_HOTPLUG_CPU | 463 | #ifdef CONFIG_HOTPLUG_CPU |
469 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 464 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
470 | unsigned long flags); | 465 | unsigned long flags); |
471 | static void rcu_stop_cpu_kthread(int cpu); | ||
472 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 466 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
473 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 467 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
474 | static int rcu_print_task_stall(struct rcu_node *rnp); | 468 | static int rcu_print_task_stall(struct rcu_node *rnp); |
@@ -491,15 +485,9 @@ static void invoke_rcu_callbacks_kthread(void); | |||
491 | static bool rcu_is_callbacks_kthread(void); | 485 | static bool rcu_is_callbacks_kthread(void); |
492 | #ifdef CONFIG_RCU_BOOST | 486 | #ifdef CONFIG_RCU_BOOST |
493 | static void rcu_preempt_do_callbacks(void); | 487 | static void rcu_preempt_do_callbacks(void); |
494 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | ||
495 | cpumask_var_t cm); | ||
496 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 488 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
497 | struct rcu_node *rnp, | 489 | struct rcu_node *rnp); |
498 | int rnp_index); | ||
499 | static void invoke_rcu_node_kthread(struct rcu_node *rnp); | ||
500 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg); | ||
501 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 490 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
502 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt); | ||
503 | static void __cpuinit rcu_prepare_kthreads(int cpu); | 491 | static void __cpuinit rcu_prepare_kthreads(int cpu); |
504 | static void rcu_prepare_for_idle_init(int cpu); | 492 | static void rcu_prepare_for_idle_init(int cpu); |
505 | static void rcu_cleanup_after_idle(int cpu); | 493 | static void rcu_cleanup_after_idle(int cpu); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 7f3244c0df01..f92115488187 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -25,6 +25,8 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/oom.h> | ||
29 | #include <linux/smpboot.h> | ||
28 | 30 | ||
29 | #define RCU_KTHREAD_PRIO 1 | 31 | #define RCU_KTHREAD_PRIO 1 |
30 | 32 | ||
@@ -118,7 +120,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
118 | */ | 120 | */ |
119 | void rcu_force_quiescent_state(void) | 121 | void rcu_force_quiescent_state(void) |
120 | { | 122 | { |
121 | force_quiescent_state(&rcu_preempt_state, 0); | 123 | force_quiescent_state(&rcu_preempt_state); |
122 | } | 124 | } |
123 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 125 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
124 | 126 | ||
@@ -136,8 +138,6 @@ static void rcu_preempt_qs(int cpu) | |||
136 | { | 138 | { |
137 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 139 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); |
138 | 140 | ||
139 | rdp->passed_quiesce_gpnum = rdp->gpnum; | ||
140 | barrier(); | ||
141 | if (rdp->passed_quiesce == 0) | 141 | if (rdp->passed_quiesce == 0) |
142 | trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); | 142 | trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); |
143 | rdp->passed_quiesce = 1; | 143 | rdp->passed_quiesce = 1; |
@@ -422,9 +422,11 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | |||
422 | unsigned long flags; | 422 | unsigned long flags; |
423 | struct task_struct *t; | 423 | struct task_struct *t; |
424 | 424 | ||
425 | if (!rcu_preempt_blocked_readers_cgp(rnp)) | ||
426 | return; | ||
427 | raw_spin_lock_irqsave(&rnp->lock, flags); | 425 | raw_spin_lock_irqsave(&rnp->lock, flags); |
426 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { | ||
427 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
428 | return; | ||
429 | } | ||
428 | t = list_entry(rnp->gp_tasks, | 430 | t = list_entry(rnp->gp_tasks, |
429 | struct task_struct, rcu_node_entry); | 431 | struct task_struct, rcu_node_entry); |
430 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) | 432 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) |
@@ -584,17 +586,23 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
584 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | 586 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ |
585 | } | 587 | } |
586 | 588 | ||
589 | rnp->gp_tasks = NULL; | ||
590 | rnp->exp_tasks = NULL; | ||
587 | #ifdef CONFIG_RCU_BOOST | 591 | #ifdef CONFIG_RCU_BOOST |
588 | /* In case root is being boosted and leaf is not. */ | 592 | rnp->boost_tasks = NULL; |
593 | /* | ||
594 | * In case root is being boosted and leaf was not. Make sure | ||
595 | * that we boost the tasks blocking the current grace period | ||
596 | * in this case. | ||
597 | */ | ||
589 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | 598 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ |
590 | if (rnp_root->boost_tasks != NULL && | 599 | if (rnp_root->boost_tasks != NULL && |
591 | rnp_root->boost_tasks != rnp_root->gp_tasks) | 600 | rnp_root->boost_tasks != rnp_root->gp_tasks && |
601 | rnp_root->boost_tasks != rnp_root->exp_tasks) | ||
592 | rnp_root->boost_tasks = rnp_root->gp_tasks; | 602 | rnp_root->boost_tasks = rnp_root->gp_tasks; |
593 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ | 603 | raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ |
594 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 604 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
595 | 605 | ||
596 | rnp->gp_tasks = NULL; | ||
597 | rnp->exp_tasks = NULL; | ||
598 | return retval; | 606 | return retval; |
599 | } | 607 | } |
600 | 608 | ||
@@ -676,7 +684,7 @@ void synchronize_rcu(void) | |||
676 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 684 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
677 | 685 | ||
678 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | 686 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); |
679 | static long sync_rcu_preempt_exp_count; | 687 | static unsigned long sync_rcu_preempt_exp_count; |
680 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | 688 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); |
681 | 689 | ||
682 | /* | 690 | /* |
@@ -791,7 +799,7 @@ void synchronize_rcu_expedited(void) | |||
791 | unsigned long flags; | 799 | unsigned long flags; |
792 | struct rcu_node *rnp; | 800 | struct rcu_node *rnp; |
793 | struct rcu_state *rsp = &rcu_preempt_state; | 801 | struct rcu_state *rsp = &rcu_preempt_state; |
794 | long snap; | 802 | unsigned long snap; |
795 | int trycount = 0; | 803 | int trycount = 0; |
796 | 804 | ||
797 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | 805 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ |
@@ -799,33 +807,47 @@ void synchronize_rcu_expedited(void) | |||
799 | smp_mb(); /* Above access cannot bleed into critical section. */ | 807 | smp_mb(); /* Above access cannot bleed into critical section. */ |
800 | 808 | ||
801 | /* | 809 | /* |
810 | * Block CPU-hotplug operations. This means that any CPU-hotplug | ||
811 | * operation that finds an rcu_node structure with tasks in the | ||
812 | * process of being boosted will know that all tasks blocking | ||
813 | * this expedited grace period will already be in the process of | ||
814 | * being boosted. This simplifies the process of moving tasks | ||
815 | * from leaf to root rcu_node structures. | ||
816 | */ | ||
817 | get_online_cpus(); | ||
818 | |||
819 | /* | ||
802 | * Acquire lock, falling back to synchronize_rcu() if too many | 820 | * Acquire lock, falling back to synchronize_rcu() if too many |
803 | * lock-acquisition failures. Of course, if someone does the | 821 | * lock-acquisition failures. Of course, if someone does the |
804 | * expedited grace period for us, just leave. | 822 | * expedited grace period for us, just leave. |
805 | */ | 823 | */ |
806 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | 824 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { |
825 | if (ULONG_CMP_LT(snap, | ||
826 | ACCESS_ONCE(sync_rcu_preempt_exp_count))) { | ||
827 | put_online_cpus(); | ||
828 | goto mb_ret; /* Others did our work for us. */ | ||
829 | } | ||
807 | if (trycount++ < 10) { | 830 | if (trycount++ < 10) { |
808 | udelay(trycount * num_online_cpus()); | 831 | udelay(trycount * num_online_cpus()); |
809 | } else { | 832 | } else { |
833 | put_online_cpus(); | ||
810 | synchronize_rcu(); | 834 | synchronize_rcu(); |
811 | return; | 835 | return; |
812 | } | 836 | } |
813 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | ||
814 | goto mb_ret; /* Others did our work for us. */ | ||
815 | } | 837 | } |
816 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | 838 | if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) { |
839 | put_online_cpus(); | ||
817 | goto unlock_mb_ret; /* Others did our work for us. */ | 840 | goto unlock_mb_ret; /* Others did our work for us. */ |
841 | } | ||
818 | 842 | ||
819 | /* force all RCU readers onto ->blkd_tasks lists. */ | 843 | /* force all RCU readers onto ->blkd_tasks lists. */ |
820 | synchronize_sched_expedited(); | 844 | synchronize_sched_expedited(); |
821 | 845 | ||
822 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | ||
823 | |||
824 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | 846 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ |
825 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | 847 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { |
826 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 848 | raw_spin_lock_irqsave(&rnp->lock, flags); |
827 | rnp->expmask = rnp->qsmaskinit; | 849 | rnp->expmask = rnp->qsmaskinit; |
828 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 850 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
829 | } | 851 | } |
830 | 852 | ||
831 | /* Snapshot current state of ->blkd_tasks lists. */ | 853 | /* Snapshot current state of ->blkd_tasks lists. */ |
@@ -834,7 +856,7 @@ void synchronize_rcu_expedited(void) | |||
834 | if (NUM_RCU_NODES > 1) | 856 | if (NUM_RCU_NODES > 1) |
835 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | 857 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); |
836 | 858 | ||
837 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 859 | put_online_cpus(); |
838 | 860 | ||
839 | /* Wait for snapshotted ->blkd_tasks lists to drain. */ | 861 | /* Wait for snapshotted ->blkd_tasks lists to drain. */ |
840 | rnp = rcu_get_root(rsp); | 862 | rnp = rcu_get_root(rsp); |
@@ -1069,6 +1091,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp) | |||
1069 | 1091 | ||
1070 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | 1092 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
1071 | 1093 | ||
1094 | static void rcu_wake_cond(struct task_struct *t, int status) | ||
1095 | { | ||
1096 | /* | ||
1097 | * If the thread is yielding, only wake it when this | ||
1098 | * is invoked from idle | ||
1099 | */ | ||
1100 | if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) | ||
1101 | wake_up_process(t); | ||
1102 | } | ||
1103 | |||
1072 | /* | 1104 | /* |
1073 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks | 1105 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks |
1074 | * or ->boost_tasks, advancing the pointer to the next task in the | 1106 | * or ->boost_tasks, advancing the pointer to the next task in the |
@@ -1141,17 +1173,6 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1141 | } | 1173 | } |
1142 | 1174 | ||
1143 | /* | 1175 | /* |
1144 | * Timer handler to initiate waking up of boost kthreads that | ||
1145 | * have yielded the CPU due to excessive numbers of tasks to | ||
1146 | * boost. We wake up the per-rcu_node kthread, which in turn | ||
1147 | * will wake up the booster kthread. | ||
1148 | */ | ||
1149 | static void rcu_boost_kthread_timer(unsigned long arg) | ||
1150 | { | ||
1151 | invoke_rcu_node_kthread((struct rcu_node *)arg); | ||
1152 | } | ||
1153 | |||
1154 | /* | ||
1155 | * Priority-boosting kthread. One per leaf rcu_node and one for the | 1176 | * Priority-boosting kthread. One per leaf rcu_node and one for the |
1156 | * root rcu_node. | 1177 | * root rcu_node. |
1157 | */ | 1178 | */ |
@@ -1174,8 +1195,9 @@ static int rcu_boost_kthread(void *arg) | |||
1174 | else | 1195 | else |
1175 | spincnt = 0; | 1196 | spincnt = 0; |
1176 | if (spincnt > 10) { | 1197 | if (spincnt > 10) { |
1198 | rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; | ||
1177 | trace_rcu_utilization("End boost kthread@rcu_yield"); | 1199 | trace_rcu_utilization("End boost kthread@rcu_yield"); |
1178 | rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); | 1200 | schedule_timeout_interruptible(2); |
1179 | trace_rcu_utilization("Start boost kthread@rcu_yield"); | 1201 | trace_rcu_utilization("Start boost kthread@rcu_yield"); |
1180 | spincnt = 0; | 1202 | spincnt = 0; |
1181 | } | 1203 | } |
@@ -1191,9 +1213,9 @@ static int rcu_boost_kthread(void *arg) | |||
1191 | * kthread to start boosting them. If there is an expedited grace | 1213 | * kthread to start boosting them. If there is an expedited grace |
1192 | * period in progress, it is always time to boost. | 1214 | * period in progress, it is always time to boost. |
1193 | * | 1215 | * |
1194 | * The caller must hold rnp->lock, which this function releases, | 1216 | * The caller must hold rnp->lock, which this function releases. |
1195 | * but irqs remain disabled. The ->boost_kthread_task is immortal, | 1217 | * The ->boost_kthread_task is immortal, so we don't need to worry |
1196 | * so we don't need to worry about it going away. | 1218 | * about it going away. |
1197 | */ | 1219 | */ |
1198 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | 1220 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
1199 | { | 1221 | { |
@@ -1213,8 +1235,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |||
1213 | rnp->boost_tasks = rnp->gp_tasks; | 1235 | rnp->boost_tasks = rnp->gp_tasks; |
1214 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1236 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1215 | t = rnp->boost_kthread_task; | 1237 | t = rnp->boost_kthread_task; |
1216 | if (t != NULL) | 1238 | if (t) |
1217 | wake_up_process(t); | 1239 | rcu_wake_cond(t, rnp->boost_kthread_status); |
1218 | } else { | 1240 | } else { |
1219 | rcu_initiate_boost_trace(rnp); | 1241 | rcu_initiate_boost_trace(rnp); |
1220 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1242 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -1231,8 +1253,10 @@ static void invoke_rcu_callbacks_kthread(void) | |||
1231 | local_irq_save(flags); | 1253 | local_irq_save(flags); |
1232 | __this_cpu_write(rcu_cpu_has_work, 1); | 1254 | __this_cpu_write(rcu_cpu_has_work, 1); |
1233 | if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && | 1255 | if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && |
1234 | current != __this_cpu_read(rcu_cpu_kthread_task)) | 1256 | current != __this_cpu_read(rcu_cpu_kthread_task)) { |
1235 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | 1257 | rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), |
1258 | __this_cpu_read(rcu_cpu_kthread_status)); | ||
1259 | } | ||
1236 | local_irq_restore(flags); | 1260 | local_irq_restore(flags); |
1237 | } | 1261 | } |
1238 | 1262 | ||
@@ -1245,21 +1269,6 @@ static bool rcu_is_callbacks_kthread(void) | |||
1245 | return __get_cpu_var(rcu_cpu_kthread_task) == current; | 1269 | return __get_cpu_var(rcu_cpu_kthread_task) == current; |
1246 | } | 1270 | } |
1247 | 1271 | ||
1248 | /* | ||
1249 | * Set the affinity of the boost kthread. The CPU-hotplug locks are | ||
1250 | * held, so no one should be messing with the existence of the boost | ||
1251 | * kthread. | ||
1252 | */ | ||
1253 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | ||
1254 | cpumask_var_t cm) | ||
1255 | { | ||
1256 | struct task_struct *t; | ||
1257 | |||
1258 | t = rnp->boost_kthread_task; | ||
1259 | if (t != NULL) | ||
1260 | set_cpus_allowed_ptr(rnp->boost_kthread_task, cm); | ||
1261 | } | ||
1262 | |||
1263 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) | 1272 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) |
1264 | 1273 | ||
1265 | /* | 1274 | /* |
@@ -1276,15 +1285,19 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
1276 | * Returns zero if all is well, a negated errno otherwise. | 1285 | * Returns zero if all is well, a negated errno otherwise. |
1277 | */ | 1286 | */ |
1278 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 1287 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
1279 | struct rcu_node *rnp, | 1288 | struct rcu_node *rnp) |
1280 | int rnp_index) | ||
1281 | { | 1289 | { |
1290 | int rnp_index = rnp - &rsp->node[0]; | ||
1282 | unsigned long flags; | 1291 | unsigned long flags; |
1283 | struct sched_param sp; | 1292 | struct sched_param sp; |
1284 | struct task_struct *t; | 1293 | struct task_struct *t; |
1285 | 1294 | ||
1286 | if (&rcu_preempt_state != rsp) | 1295 | if (&rcu_preempt_state != rsp) |
1287 | return 0; | 1296 | return 0; |
1297 | |||
1298 | if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0) | ||
1299 | return 0; | ||
1300 | |||
1288 | rsp->boost = 1; | 1301 | rsp->boost = 1; |
1289 | if (rnp->boost_kthread_task != NULL) | 1302 | if (rnp->boost_kthread_task != NULL) |
1290 | return 0; | 1303 | return 0; |
@@ -1301,25 +1314,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1301 | return 0; | 1314 | return 0; |
1302 | } | 1315 | } |
1303 | 1316 | ||
1304 | #ifdef CONFIG_HOTPLUG_CPU | ||
1305 | |||
1306 | /* | ||
1307 | * Stop the RCU's per-CPU kthread when its CPU goes offline,. | ||
1308 | */ | ||
1309 | static void rcu_stop_cpu_kthread(int cpu) | ||
1310 | { | ||
1311 | struct task_struct *t; | ||
1312 | |||
1313 | /* Stop the CPU's kthread. */ | ||
1314 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1315 | if (t != NULL) { | ||
1316 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
1317 | kthread_stop(t); | ||
1318 | } | ||
1319 | } | ||
1320 | |||
1321 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1322 | |||
1323 | static void rcu_kthread_do_work(void) | 1317 | static void rcu_kthread_do_work(void) |
1324 | { | 1318 | { |
1325 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | 1319 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); |
@@ -1327,112 +1321,22 @@ static void rcu_kthread_do_work(void) | |||
1327 | rcu_preempt_do_callbacks(); | 1321 | rcu_preempt_do_callbacks(); |
1328 | } | 1322 | } |
1329 | 1323 | ||
1330 | /* | 1324 | static void rcu_cpu_kthread_setup(unsigned int cpu) |
1331 | * Wake up the specified per-rcu_node-structure kthread. | ||
1332 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
1333 | * to do anything to keep them alive. | ||
1334 | */ | ||
1335 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
1336 | { | ||
1337 | struct task_struct *t; | ||
1338 | |||
1339 | t = rnp->node_kthread_task; | ||
1340 | if (t != NULL) | ||
1341 | wake_up_process(t); | ||
1342 | } | ||
1343 | |||
1344 | /* | ||
1345 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
1346 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
1347 | * is not going away. | ||
1348 | */ | ||
1349 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1350 | { | 1325 | { |
1351 | int policy; | ||
1352 | struct sched_param sp; | 1326 | struct sched_param sp; |
1353 | struct task_struct *t; | ||
1354 | 1327 | ||
1355 | t = per_cpu(rcu_cpu_kthread_task, cpu); | 1328 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1356 | if (t == NULL) | 1329 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
1357 | return; | ||
1358 | if (to_rt) { | ||
1359 | policy = SCHED_FIFO; | ||
1360 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1361 | } else { | ||
1362 | policy = SCHED_NORMAL; | ||
1363 | sp.sched_priority = 0; | ||
1364 | } | ||
1365 | sched_setscheduler_nocheck(t, policy, &sp); | ||
1366 | } | 1330 | } |
1367 | 1331 | ||
1368 | /* | 1332 | static void rcu_cpu_kthread_park(unsigned int cpu) |
1369 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
1370 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
1371 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
1372 | * the booster kthread. | ||
1373 | */ | ||
1374 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
1375 | { | 1333 | { |
1376 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | 1334 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; |
1377 | struct rcu_node *rnp = rdp->mynode; | ||
1378 | |||
1379 | atomic_or(rdp->grpmask, &rnp->wakemask); | ||
1380 | invoke_rcu_node_kthread(rnp); | ||
1381 | } | 1335 | } |
1382 | 1336 | ||
1383 | /* | 1337 | static int rcu_cpu_kthread_should_run(unsigned int cpu) |
1384 | * Drop to non-real-time priority and yield, but only after posting a | ||
1385 | * timer that will cause us to regain our real-time priority if we | ||
1386 | * remain preempted. Either way, we restore our real-time priority | ||
1387 | * before returning. | ||
1388 | */ | ||
1389 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
1390 | { | 1338 | { |
1391 | struct sched_param sp; | 1339 | return __get_cpu_var(rcu_cpu_has_work); |
1392 | struct timer_list yield_timer; | ||
1393 | int prio = current->rt_priority; | ||
1394 | |||
1395 | setup_timer_on_stack(&yield_timer, f, arg); | ||
1396 | mod_timer(&yield_timer, jiffies + 2); | ||
1397 | sp.sched_priority = 0; | ||
1398 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
1399 | set_user_nice(current, 19); | ||
1400 | schedule(); | ||
1401 | set_user_nice(current, 0); | ||
1402 | sp.sched_priority = prio; | ||
1403 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
1404 | del_timer(&yield_timer); | ||
1405 | } | ||
1406 | |||
1407 | /* | ||
1408 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
1409 | * This can happen while the corresponding CPU is either coming online | ||
1410 | * or going offline. We cannot wait until the CPU is fully online | ||
1411 | * before starting the kthread, because the various notifier functions | ||
1412 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
1413 | * the corresponding CPU is online. | ||
1414 | * | ||
1415 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
1416 | * | ||
1417 | * Caller must disable bh. This function can momentarily enable it. | ||
1418 | */ | ||
1419 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
1420 | { | ||
1421 | while (cpu_is_offline(cpu) || | ||
1422 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
1423 | smp_processor_id() != cpu) { | ||
1424 | if (kthread_should_stop()) | ||
1425 | return 1; | ||
1426 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
1427 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
1428 | local_bh_enable(); | ||
1429 | schedule_timeout_uninterruptible(1); | ||
1430 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
1431 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
1432 | local_bh_disable(); | ||
1433 | } | ||
1434 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1435 | return 0; | ||
1436 | } | 1340 | } |
1437 | 1341 | ||
1438 | /* | 1342 | /* |
@@ -1440,138 +1344,35 @@ static int rcu_cpu_kthread_should_stop(int cpu) | |||
1440 | * RCU softirq used in flavors and configurations of RCU that do not | 1344 | * RCU softirq used in flavors and configurations of RCU that do not |
1441 | * support RCU priority boosting. | 1345 | * support RCU priority boosting. |
1442 | */ | 1346 | */ |
1443 | static int rcu_cpu_kthread(void *arg) | 1347 | static void rcu_cpu_kthread(unsigned int cpu) |
1444 | { | 1348 | { |
1445 | int cpu = (int)(long)arg; | 1349 | unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); |
1446 | unsigned long flags; | 1350 | char work, *workp = &__get_cpu_var(rcu_cpu_has_work); |
1447 | int spincnt = 0; | 1351 | int spincnt; |
1448 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
1449 | char work; | ||
1450 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
1451 | 1352 | ||
1452 | trace_rcu_utilization("Start CPU kthread@init"); | 1353 | for (spincnt = 0; spincnt < 10; spincnt++) { |
1453 | for (;;) { | ||
1454 | *statusp = RCU_KTHREAD_WAITING; | ||
1455 | trace_rcu_utilization("End CPU kthread@rcu_wait"); | ||
1456 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
1457 | trace_rcu_utilization("Start CPU kthread@rcu_wait"); | 1354 | trace_rcu_utilization("Start CPU kthread@rcu_wait"); |
1458 | local_bh_disable(); | 1355 | local_bh_disable(); |
1459 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
1460 | local_bh_enable(); | ||
1461 | break; | ||
1462 | } | ||
1463 | *statusp = RCU_KTHREAD_RUNNING; | 1356 | *statusp = RCU_KTHREAD_RUNNING; |
1464 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | 1357 | this_cpu_inc(rcu_cpu_kthread_loops); |
1465 | local_irq_save(flags); | 1358 | local_irq_disable(); |
1466 | work = *workp; | 1359 | work = *workp; |
1467 | *workp = 0; | 1360 | *workp = 0; |
1468 | local_irq_restore(flags); | 1361 | local_irq_enable(); |
1469 | if (work) | 1362 | if (work) |
1470 | rcu_kthread_do_work(); | 1363 | rcu_kthread_do_work(); |
1471 | local_bh_enable(); | 1364 | local_bh_enable(); |
1472 | if (*workp != 0) | 1365 | if (*workp == 0) { |
1473 | spincnt++; | 1366 | trace_rcu_utilization("End CPU kthread@rcu_wait"); |
1474 | else | 1367 | *statusp = RCU_KTHREAD_WAITING; |
1475 | spincnt = 0; | 1368 | return; |
1476 | if (spincnt > 10) { | ||
1477 | *statusp = RCU_KTHREAD_YIELDING; | ||
1478 | trace_rcu_utilization("End CPU kthread@rcu_yield"); | ||
1479 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
1480 | trace_rcu_utilization("Start CPU kthread@rcu_yield"); | ||
1481 | spincnt = 0; | ||
1482 | } | ||
1483 | } | ||
1484 | *statusp = RCU_KTHREAD_STOPPED; | ||
1485 | trace_rcu_utilization("End CPU kthread@term"); | ||
1486 | return 0; | ||
1487 | } | ||
1488 | |||
1489 | /* | ||
1490 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
1491 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
1492 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
1493 | * attempting to access it during boot, but the locking in kthread_bind() | ||
1494 | * will enforce sufficient ordering. | ||
1495 | * | ||
1496 | * Please note that we cannot simply refuse to wake up the per-CPU | ||
1497 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | ||
1498 | * which can result in softlockup complaints if the task ends up being | ||
1499 | * idle for more than a couple of minutes. | ||
1500 | * | ||
1501 | * However, please note also that we cannot bind the per-CPU kthread to its | ||
1502 | * CPU until that CPU is fully online. We also cannot wait until the | ||
1503 | * CPU is fully online before we create its per-CPU kthread, as this would | ||
1504 | * deadlock the system when CPU notifiers tried waiting for grace | ||
1505 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | ||
1506 | * is online. If its CPU is not yet fully online, then the code in | ||
1507 | * rcu_cpu_kthread() will wait until it is fully online, and then do | ||
1508 | * the binding. | ||
1509 | */ | ||
1510 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
1511 | { | ||
1512 | struct sched_param sp; | ||
1513 | struct task_struct *t; | ||
1514 | |||
1515 | if (!rcu_scheduler_fully_active || | ||
1516 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
1517 | return 0; | ||
1518 | t = kthread_create_on_node(rcu_cpu_kthread, | ||
1519 | (void *)(long)cpu, | ||
1520 | cpu_to_node(cpu), | ||
1521 | "rcuc/%d", cpu); | ||
1522 | if (IS_ERR(t)) | ||
1523 | return PTR_ERR(t); | ||
1524 | if (cpu_online(cpu)) | ||
1525 | kthread_bind(t, cpu); | ||
1526 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1527 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
1528 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1529 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1530 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1531 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | ||
1532 | return 0; | ||
1533 | } | ||
1534 | |||
1535 | /* | ||
1536 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
1537 | * kthreads when needed. We ignore requests to wake up kthreads | ||
1538 | * for offline CPUs, which is OK because force_quiescent_state() | ||
1539 | * takes care of this case. | ||
1540 | */ | ||
1541 | static int rcu_node_kthread(void *arg) | ||
1542 | { | ||
1543 | int cpu; | ||
1544 | unsigned long flags; | ||
1545 | unsigned long mask; | ||
1546 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1547 | struct sched_param sp; | ||
1548 | struct task_struct *t; | ||
1549 | |||
1550 | for (;;) { | ||
1551 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
1552 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | ||
1553 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
1554 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1555 | mask = atomic_xchg(&rnp->wakemask, 0); | ||
1556 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1557 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
1558 | if ((mask & 0x1) == 0) | ||
1559 | continue; | ||
1560 | preempt_disable(); | ||
1561 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1562 | if (!cpu_online(cpu) || t == NULL) { | ||
1563 | preempt_enable(); | ||
1564 | continue; | ||
1565 | } | ||
1566 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
1567 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1568 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1569 | preempt_enable(); | ||
1570 | } | 1369 | } |
1571 | } | 1370 | } |
1572 | /* NOTREACHED */ | 1371 | *statusp = RCU_KTHREAD_YIELDING; |
1573 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | 1372 | trace_rcu_utilization("Start CPU kthread@rcu_yield"); |
1574 | return 0; | 1373 | schedule_timeout_interruptible(2); |
1374 | trace_rcu_utilization("End CPU kthread@rcu_yield"); | ||
1375 | *statusp = RCU_KTHREAD_WAITING; | ||
1575 | } | 1376 | } |
1576 | 1377 | ||
1577 | /* | 1378 | /* |
@@ -1583,17 +1384,17 @@ static int rcu_node_kthread(void *arg) | |||
1583 | * no outgoing CPU. If there are no CPUs left in the affinity set, | 1384 | * no outgoing CPU. If there are no CPUs left in the affinity set, |
1584 | * this function allows the kthread to execute on any CPU. | 1385 | * this function allows the kthread to execute on any CPU. |
1585 | */ | 1386 | */ |
1586 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | 1387 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) |
1587 | { | 1388 | { |
1389 | struct task_struct *t = rnp->boost_kthread_task; | ||
1390 | unsigned long mask = rnp->qsmaskinit; | ||
1588 | cpumask_var_t cm; | 1391 | cpumask_var_t cm; |
1589 | int cpu; | 1392 | int cpu; |
1590 | unsigned long mask = rnp->qsmaskinit; | ||
1591 | 1393 | ||
1592 | if (rnp->node_kthread_task == NULL) | 1394 | if (!t) |
1593 | return; | 1395 | return; |
1594 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | 1396 | if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) |
1595 | return; | 1397 | return; |
1596 | cpumask_clear(cm); | ||
1597 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | 1398 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) |
1598 | if ((mask & 0x1) && cpu != outgoingcpu) | 1399 | if ((mask & 0x1) && cpu != outgoingcpu) |
1599 | cpumask_set_cpu(cpu, cm); | 1400 | cpumask_set_cpu(cpu, cm); |
@@ -1603,62 +1404,36 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |||
1603 | cpumask_clear_cpu(cpu, cm); | 1404 | cpumask_clear_cpu(cpu, cm); |
1604 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | 1405 | WARN_ON_ONCE(cpumask_weight(cm) == 0); |
1605 | } | 1406 | } |
1606 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | 1407 | set_cpus_allowed_ptr(t, cm); |
1607 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
1608 | free_cpumask_var(cm); | 1408 | free_cpumask_var(cm); |
1609 | } | 1409 | } |
1610 | 1410 | ||
1611 | /* | 1411 | static struct smp_hotplug_thread rcu_cpu_thread_spec = { |
1612 | * Spawn a per-rcu_node kthread, setting priority and affinity. | 1412 | .store = &rcu_cpu_kthread_task, |
1613 | * Called during boot before online/offline can happen, or, if | 1413 | .thread_should_run = rcu_cpu_kthread_should_run, |
1614 | * during runtime, with the main CPU-hotplug locks held. So only | 1414 | .thread_fn = rcu_cpu_kthread, |
1615 | * one of these can be executing at a time. | 1415 | .thread_comm = "rcuc/%u", |
1616 | */ | 1416 | .setup = rcu_cpu_kthread_setup, |
1617 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | 1417 | .park = rcu_cpu_kthread_park, |
1618 | struct rcu_node *rnp) | 1418 | }; |
1619 | { | ||
1620 | unsigned long flags; | ||
1621 | int rnp_index = rnp - &rsp->node[0]; | ||
1622 | struct sched_param sp; | ||
1623 | struct task_struct *t; | ||
1624 | |||
1625 | if (!rcu_scheduler_fully_active || | ||
1626 | rnp->qsmaskinit == 0) | ||
1627 | return 0; | ||
1628 | if (rnp->node_kthread_task == NULL) { | ||
1629 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
1630 | "rcun/%d", rnp_index); | ||
1631 | if (IS_ERR(t)) | ||
1632 | return PTR_ERR(t); | ||
1633 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1634 | rnp->node_kthread_task = t; | ||
1635 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1636 | sp.sched_priority = 99; | ||
1637 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1638 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
1639 | } | ||
1640 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
1641 | } | ||
1642 | 1419 | ||
1643 | /* | 1420 | /* |
1644 | * Spawn all kthreads -- called as soon as the scheduler is running. | 1421 | * Spawn all kthreads -- called as soon as the scheduler is running. |
1645 | */ | 1422 | */ |
1646 | static int __init rcu_spawn_kthreads(void) | 1423 | static int __init rcu_spawn_kthreads(void) |
1647 | { | 1424 | { |
1648 | int cpu; | ||
1649 | struct rcu_node *rnp; | 1425 | struct rcu_node *rnp; |
1426 | int cpu; | ||
1650 | 1427 | ||
1651 | rcu_scheduler_fully_active = 1; | 1428 | rcu_scheduler_fully_active = 1; |
1652 | for_each_possible_cpu(cpu) { | 1429 | for_each_possible_cpu(cpu) |
1653 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1430 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1654 | if (cpu_online(cpu)) | 1431 | BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); |
1655 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1656 | } | ||
1657 | rnp = rcu_get_root(rcu_state); | 1432 | rnp = rcu_get_root(rcu_state); |
1658 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | 1433 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); |
1659 | if (NUM_RCU_NODES > 1) { | 1434 | if (NUM_RCU_NODES > 1) { |
1660 | rcu_for_each_leaf_node(rcu_state, rnp) | 1435 | rcu_for_each_leaf_node(rcu_state, rnp) |
1661 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | 1436 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); |
1662 | } | 1437 | } |
1663 | return 0; | 1438 | return 0; |
1664 | } | 1439 | } |
@@ -1670,11 +1445,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
1670 | struct rcu_node *rnp = rdp->mynode; | 1445 | struct rcu_node *rnp = rdp->mynode; |
1671 | 1446 | ||
1672 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | 1447 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ |
1673 | if (rcu_scheduler_fully_active) { | 1448 | if (rcu_scheduler_fully_active) |
1674 | (void)rcu_spawn_one_cpu_kthread(cpu); | 1449 | (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); |
1675 | if (rnp->node_kthread_task == NULL) | ||
1676 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1677 | } | ||
1678 | } | 1450 | } |
1679 | 1451 | ||
1680 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1452 | #else /* #ifdef CONFIG_RCU_BOOST */ |
@@ -1698,19 +1470,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
1698 | { | 1470 | { |
1699 | } | 1471 | } |
1700 | 1472 | ||
1701 | #ifdef CONFIG_HOTPLUG_CPU | 1473 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) |
1702 | |||
1703 | static void rcu_stop_cpu_kthread(int cpu) | ||
1704 | { | ||
1705 | } | ||
1706 | |||
1707 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1708 | |||
1709 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1710 | { | ||
1711 | } | ||
1712 | |||
1713 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1714 | { | 1474 | { |
1715 | } | 1475 | } |
1716 | 1476 | ||
@@ -1997,6 +1757,26 @@ static void rcu_prepare_for_idle(int cpu) | |||
1997 | if (!tne) | 1757 | if (!tne) |
1998 | return; | 1758 | return; |
1999 | 1759 | ||
1760 | /* Adaptive-tick mode, where usermode execution is idle to RCU. */ | ||
1761 | if (!is_idle_task(current)) { | ||
1762 | rdtp->dyntick_holdoff = jiffies - 1; | ||
1763 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) { | ||
1764 | trace_rcu_prep_idle("User dyntick with callbacks"); | ||
1765 | rdtp->idle_gp_timer_expires = | ||
1766 | round_up(jiffies + RCU_IDLE_GP_DELAY, | ||
1767 | RCU_IDLE_GP_DELAY); | ||
1768 | } else if (rcu_cpu_has_callbacks(cpu)) { | ||
1769 | rdtp->idle_gp_timer_expires = | ||
1770 | round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY); | ||
1771 | trace_rcu_prep_idle("User dyntick with lazy callbacks"); | ||
1772 | } else { | ||
1773 | return; | ||
1774 | } | ||
1775 | tp = &rdtp->idle_gp_timer; | ||
1776 | mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); | ||
1777 | return; | ||
1778 | } | ||
1779 | |||
2000 | /* | 1780 | /* |
2001 | * If this is an idle re-entry, for example, due to use of | 1781 | * If this is an idle re-entry, for example, due to use of |
2002 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | 1782 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle |
@@ -2075,16 +1855,16 @@ static void rcu_prepare_for_idle(int cpu) | |||
2075 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1855 | #ifdef CONFIG_TREE_PREEMPT_RCU |
2076 | if (per_cpu(rcu_preempt_data, cpu).nxtlist) { | 1856 | if (per_cpu(rcu_preempt_data, cpu).nxtlist) { |
2077 | rcu_preempt_qs(cpu); | 1857 | rcu_preempt_qs(cpu); |
2078 | force_quiescent_state(&rcu_preempt_state, 0); | 1858 | force_quiescent_state(&rcu_preempt_state); |
2079 | } | 1859 | } |
2080 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1860 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
2081 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | 1861 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { |
2082 | rcu_sched_qs(cpu); | 1862 | rcu_sched_qs(cpu); |
2083 | force_quiescent_state(&rcu_sched_state, 0); | 1863 | force_quiescent_state(&rcu_sched_state); |
2084 | } | 1864 | } |
2085 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | 1865 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { |
2086 | rcu_bh_qs(cpu); | 1866 | rcu_bh_qs(cpu); |
2087 | force_quiescent_state(&rcu_bh_state, 0); | 1867 | force_quiescent_state(&rcu_bh_state); |
2088 | } | 1868 | } |
2089 | 1869 | ||
2090 | /* | 1870 | /* |
@@ -2112,6 +1892,88 @@ static void rcu_idle_count_callbacks_posted(void) | |||
2112 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); | 1892 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); |
2113 | } | 1893 | } |
2114 | 1894 | ||
1895 | /* | ||
1896 | * Data for flushing lazy RCU callbacks at OOM time. | ||
1897 | */ | ||
1898 | static atomic_t oom_callback_count; | ||
1899 | static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq); | ||
1900 | |||
1901 | /* | ||
1902 | * RCU OOM callback -- decrement the outstanding count and deliver the | ||
1903 | * wake-up if we are the last one. | ||
1904 | */ | ||
1905 | static void rcu_oom_callback(struct rcu_head *rhp) | ||
1906 | { | ||
1907 | if (atomic_dec_and_test(&oom_callback_count)) | ||
1908 | wake_up(&oom_callback_wq); | ||
1909 | } | ||
1910 | |||
1911 | /* | ||
1912 | * Post an rcu_oom_notify callback on the current CPU if it has at | ||
1913 | * least one lazy callback. This will unnecessarily post callbacks | ||
1914 | * to CPUs that already have a non-lazy callback at the end of their | ||
1915 | * callback list, but this is an infrequent operation, so accept some | ||
1916 | * extra overhead to keep things simple. | ||
1917 | */ | ||
1918 | static void rcu_oom_notify_cpu(void *unused) | ||
1919 | { | ||
1920 | struct rcu_state *rsp; | ||
1921 | struct rcu_data *rdp; | ||
1922 | |||
1923 | for_each_rcu_flavor(rsp) { | ||
1924 | rdp = __this_cpu_ptr(rsp->rda); | ||
1925 | if (rdp->qlen_lazy != 0) { | ||
1926 | atomic_inc(&oom_callback_count); | ||
1927 | rsp->call(&rdp->oom_head, rcu_oom_callback); | ||
1928 | } | ||
1929 | } | ||
1930 | } | ||
1931 | |||
1932 | /* | ||
1933 | * If low on memory, ensure that each CPU has a non-lazy callback. | ||
1934 | * This will wake up CPUs that have only lazy callbacks, in turn | ||
1935 | * ensuring that they free up the corresponding memory in a timely manner. | ||
1936 | * Because an uncertain amount of memory will be freed in some uncertain | ||
1937 | * timeframe, we do not claim to have freed anything. | ||
1938 | */ | ||
1939 | static int rcu_oom_notify(struct notifier_block *self, | ||
1940 | unsigned long notused, void *nfreed) | ||
1941 | { | ||
1942 | int cpu; | ||
1943 | |||
1944 | /* Wait for callbacks from earlier instance to complete. */ | ||
1945 | wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); | ||
1946 | |||
1947 | /* | ||
1948 | * Prevent premature wakeup: ensure that all increments happen | ||
1949 | * before there is a chance of the counter reaching zero. | ||
1950 | */ | ||
1951 | atomic_set(&oom_callback_count, 1); | ||
1952 | |||
1953 | get_online_cpus(); | ||
1954 | for_each_online_cpu(cpu) { | ||
1955 | smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); | ||
1956 | cond_resched(); | ||
1957 | } | ||
1958 | put_online_cpus(); | ||
1959 | |||
1960 | /* Unconditionally decrement: no need to wake ourselves up. */ | ||
1961 | atomic_dec(&oom_callback_count); | ||
1962 | |||
1963 | return NOTIFY_OK; | ||
1964 | } | ||
1965 | |||
1966 | static struct notifier_block rcu_oom_nb = { | ||
1967 | .notifier_call = rcu_oom_notify | ||
1968 | }; | ||
1969 | |||
1970 | static int __init rcu_register_oom_notifier(void) | ||
1971 | { | ||
1972 | register_oom_notifier(&rcu_oom_nb); | ||
1973 | return 0; | ||
1974 | } | ||
1975 | early_initcall(rcu_register_oom_notifier); | ||
1976 | |||
2115 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 1977 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
2116 | 1978 | ||
2117 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 1979 | #ifdef CONFIG_RCU_CPU_STALL_INFO |
@@ -2122,11 +1984,15 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu) | |||
2122 | { | 1984 | { |
2123 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | 1985 | struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); |
2124 | struct timer_list *tltp = &rdtp->idle_gp_timer; | 1986 | struct timer_list *tltp = &rdtp->idle_gp_timer; |
1987 | char c; | ||
2125 | 1988 | ||
2126 | sprintf(cp, "drain=%d %c timer=%lu", | 1989 | c = rdtp->dyntick_holdoff == jiffies ? 'H' : '.'; |
2127 | rdtp->dyntick_drain, | 1990 | if (timer_pending(tltp)) |
2128 | rdtp->dyntick_holdoff == jiffies ? 'H' : '.', | 1991 | sprintf(cp, "drain=%d %c timer=%lu", |
2129 | timer_pending(tltp) ? tltp->expires - jiffies : -1); | 1992 | rdtp->dyntick_drain, c, tltp->expires - jiffies); |
1993 | else | ||
1994 | sprintf(cp, "drain=%d %c timer not pending", | ||
1995 | rdtp->dyntick_drain, c); | ||
2130 | } | 1996 | } |
2131 | 1997 | ||
2132 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 1998 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
@@ -2194,11 +2060,10 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp) | |||
2194 | /* Increment ->ticks_this_gp for all flavors of RCU. */ | 2060 | /* Increment ->ticks_this_gp for all flavors of RCU. */ |
2195 | static void increment_cpu_stall_ticks(void) | 2061 | static void increment_cpu_stall_ticks(void) |
2196 | { | 2062 | { |
2197 | __get_cpu_var(rcu_sched_data).ticks_this_gp++; | 2063 | struct rcu_state *rsp; |
2198 | __get_cpu_var(rcu_bh_data).ticks_this_gp++; | 2064 | |
2199 | #ifdef CONFIG_TREE_PREEMPT_RCU | 2065 | for_each_rcu_flavor(rsp) |
2200 | __get_cpu_var(rcu_preempt_data).ticks_this_gp++; | 2066 | __this_cpu_ptr(rsp->rda)->ticks_this_gp++; |
2201 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
2202 | } | 2067 | } |
2203 | 2068 | ||
2204 | #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ | 2069 | #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index abffb486e94e..693513bc50e6 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -51,8 +51,8 @@ static int show_rcubarrier(struct seq_file *m, void *unused) | |||
51 | struct rcu_state *rsp; | 51 | struct rcu_state *rsp; |
52 | 52 | ||
53 | for_each_rcu_flavor(rsp) | 53 | for_each_rcu_flavor(rsp) |
54 | seq_printf(m, "%s: %c bcc: %d nbd: %lu\n", | 54 | seq_printf(m, "%s: bcc: %d nbd: %lu\n", |
55 | rsp->name, rsp->rcu_barrier_in_progress ? 'B' : '.', | 55 | rsp->name, |
56 | atomic_read(&rsp->barrier_cpu_count), | 56 | atomic_read(&rsp->barrier_cpu_count), |
57 | rsp->n_barrier_done); | 57 | rsp->n_barrier_done); |
58 | return 0; | 58 | return 0; |
@@ -86,12 +86,11 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
86 | { | 86 | { |
87 | if (!rdp->beenonline) | 87 | if (!rdp->beenonline) |
88 | return; | 88 | return; |
89 | seq_printf(m, "%3d%cc=%lu g=%lu pq=%d pgp=%lu qp=%d", | 89 | seq_printf(m, "%3d%cc=%lu g=%lu pq=%d qp=%d", |
90 | rdp->cpu, | 90 | rdp->cpu, |
91 | cpu_is_offline(rdp->cpu) ? '!' : ' ', | 91 | cpu_is_offline(rdp->cpu) ? '!' : ' ', |
92 | rdp->completed, rdp->gpnum, | 92 | rdp->completed, rdp->gpnum, |
93 | rdp->passed_quiesce, rdp->passed_quiesce_gpnum, | 93 | rdp->passed_quiesce, rdp->qs_pending); |
94 | rdp->qs_pending); | ||
95 | seq_printf(m, " dt=%d/%llx/%d df=%lu", | 94 | seq_printf(m, " dt=%d/%llx/%d df=%lu", |
96 | atomic_read(&rdp->dynticks->dynticks), | 95 | atomic_read(&rdp->dynticks->dynticks), |
97 | rdp->dynticks->dynticks_nesting, | 96 | rdp->dynticks->dynticks_nesting, |
@@ -108,11 +107,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
108 | rdp->nxttail[RCU_WAIT_TAIL]], | 107 | rdp->nxttail[RCU_WAIT_TAIL]], |
109 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); | 108 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); |
110 | #ifdef CONFIG_RCU_BOOST | 109 | #ifdef CONFIG_RCU_BOOST |
111 | seq_printf(m, " kt=%d/%c/%d ktl=%x", | 110 | seq_printf(m, " kt=%d/%c ktl=%x", |
112 | per_cpu(rcu_cpu_has_work, rdp->cpu), | 111 | per_cpu(rcu_cpu_has_work, rdp->cpu), |
113 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, | 112 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, |
114 | rdp->cpu)), | 113 | rdp->cpu)), |
115 | per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), | ||
116 | per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); | 114 | per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); |
117 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 115 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
118 | seq_printf(m, " b=%ld", rdp->blimit); | 116 | seq_printf(m, " b=%ld", rdp->blimit); |
@@ -150,12 +148,11 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
150 | { | 148 | { |
151 | if (!rdp->beenonline) | 149 | if (!rdp->beenonline) |
152 | return; | 150 | return; |
153 | seq_printf(m, "%d,%s,%lu,%lu,%d,%lu,%d", | 151 | seq_printf(m, "%d,%s,%lu,%lu,%d,%d", |
154 | rdp->cpu, | 152 | rdp->cpu, |
155 | cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"", | 153 | cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"", |
156 | rdp->completed, rdp->gpnum, | 154 | rdp->completed, rdp->gpnum, |
157 | rdp->passed_quiesce, rdp->passed_quiesce_gpnum, | 155 | rdp->passed_quiesce, rdp->qs_pending); |
158 | rdp->qs_pending); | ||
159 | seq_printf(m, ",%d,%llx,%d,%lu", | 156 | seq_printf(m, ",%d,%llx,%d,%lu", |
160 | atomic_read(&rdp->dynticks->dynticks), | 157 | atomic_read(&rdp->dynticks->dynticks), |
161 | rdp->dynticks->dynticks_nesting, | 158 | rdp->dynticks->dynticks_nesting, |
@@ -186,7 +183,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) | |||
186 | int cpu; | 183 | int cpu; |
187 | struct rcu_state *rsp; | 184 | struct rcu_state *rsp; |
188 | 185 | ||
189 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); | 186 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pq\","); |
190 | seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); | 187 | seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); |
191 | seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\""); | 188 | seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\""); |
192 | #ifdef CONFIG_RCU_BOOST | 189 | #ifdef CONFIG_RCU_BOOST |
@@ -386,10 +383,9 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) | |||
386 | rdp->n_rp_report_qs, | 383 | rdp->n_rp_report_qs, |
387 | rdp->n_rp_cb_ready, | 384 | rdp->n_rp_cb_ready, |
388 | rdp->n_rp_cpu_needs_gp); | 385 | rdp->n_rp_cpu_needs_gp); |
389 | seq_printf(m, "gpc=%ld gps=%ld nf=%ld nn=%ld\n", | 386 | seq_printf(m, "gpc=%ld gps=%ld nn=%ld\n", |
390 | rdp->n_rp_gp_completed, | 387 | rdp->n_rp_gp_completed, |
391 | rdp->n_rp_gp_started, | 388 | rdp->n_rp_gp_started, |
392 | rdp->n_rp_need_fqs, | ||
393 | rdp->n_rp_need_nothing); | 389 | rdp->n_rp_need_nothing); |
394 | } | 390 | } |
395 | 391 | ||
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 173ea52f3af0..f06d249e103b 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile | |||
@@ -11,7 +11,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | |||
11 | CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer | 11 | CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer |
12 | endif | 12 | endif |
13 | 13 | ||
14 | obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o | 14 | obj-y += core.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o |
15 | obj-$(CONFIG_SMP) += cpupri.o | 15 | obj-$(CONFIG_SMP) += cpupri.o |
16 | obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o | 16 | obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o |
17 | obj-$(CONFIG_SCHEDSTATS) += stats.o | 17 | obj-$(CONFIG_SCHEDSTATS) += stats.o |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fbf1fd098dc6..c17747236438 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -740,126 +740,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) | |||
740 | dequeue_task(rq, p, flags); | 740 | dequeue_task(rq, p, flags); |
741 | } | 741 | } |
742 | 742 | ||
743 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
744 | |||
745 | /* | ||
746 | * There are no locks covering percpu hardirq/softirq time. | ||
747 | * They are only modified in account_system_vtime, on corresponding CPU | ||
748 | * with interrupts disabled. So, writes are safe. | ||
749 | * They are read and saved off onto struct rq in update_rq_clock(). | ||
750 | * This may result in other CPU reading this CPU's irq time and can | ||
751 | * race with irq/account_system_vtime on this CPU. We would either get old | ||
752 | * or new value with a side effect of accounting a slice of irq time to wrong | ||
753 | * task when irq is in progress while we read rq->clock. That is a worthy | ||
754 | * compromise in place of having locks on each irq in account_system_time. | ||
755 | */ | ||
756 | static DEFINE_PER_CPU(u64, cpu_hardirq_time); | ||
757 | static DEFINE_PER_CPU(u64, cpu_softirq_time); | ||
758 | |||
759 | static DEFINE_PER_CPU(u64, irq_start_time); | ||
760 | static int sched_clock_irqtime; | ||
761 | |||
762 | void enable_sched_clock_irqtime(void) | ||
763 | { | ||
764 | sched_clock_irqtime = 1; | ||
765 | } | ||
766 | |||
767 | void disable_sched_clock_irqtime(void) | ||
768 | { | ||
769 | sched_clock_irqtime = 0; | ||
770 | } | ||
771 | |||
772 | #ifndef CONFIG_64BIT | ||
773 | static DEFINE_PER_CPU(seqcount_t, irq_time_seq); | ||
774 | |||
775 | static inline void irq_time_write_begin(void) | ||
776 | { | ||
777 | __this_cpu_inc(irq_time_seq.sequence); | ||
778 | smp_wmb(); | ||
779 | } | ||
780 | |||
781 | static inline void irq_time_write_end(void) | ||
782 | { | ||
783 | smp_wmb(); | ||
784 | __this_cpu_inc(irq_time_seq.sequence); | ||
785 | } | ||
786 | |||
787 | static inline u64 irq_time_read(int cpu) | ||
788 | { | ||
789 | u64 irq_time; | ||
790 | unsigned seq; | ||
791 | |||
792 | do { | ||
793 | seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); | ||
794 | irq_time = per_cpu(cpu_softirq_time, cpu) + | ||
795 | per_cpu(cpu_hardirq_time, cpu); | ||
796 | } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); | ||
797 | |||
798 | return irq_time; | ||
799 | } | ||
800 | #else /* CONFIG_64BIT */ | ||
801 | static inline void irq_time_write_begin(void) | ||
802 | { | ||
803 | } | ||
804 | |||
805 | static inline void irq_time_write_end(void) | ||
806 | { | ||
807 | } | ||
808 | |||
809 | static inline u64 irq_time_read(int cpu) | ||
810 | { | ||
811 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); | ||
812 | } | ||
813 | #endif /* CONFIG_64BIT */ | ||
814 | |||
815 | /* | ||
816 | * Called before incrementing preempt_count on {soft,}irq_enter | ||
817 | * and before decrementing preempt_count on {soft,}irq_exit. | ||
818 | */ | ||
819 | void account_system_vtime(struct task_struct *curr) | ||
820 | { | ||
821 | unsigned long flags; | ||
822 | s64 delta; | ||
823 | int cpu; | ||
824 | |||
825 | if (!sched_clock_irqtime) | ||
826 | return; | ||
827 | |||
828 | local_irq_save(flags); | ||
829 | |||
830 | cpu = smp_processor_id(); | ||
831 | delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); | ||
832 | __this_cpu_add(irq_start_time, delta); | ||
833 | |||
834 | irq_time_write_begin(); | ||
835 | /* | ||
836 | * We do not account for softirq time from ksoftirqd here. | ||
837 | * We want to continue accounting softirq time to ksoftirqd thread | ||
838 | * in that case, so as not to confuse scheduler with a special task | ||
839 | * that do not consume any time, but still wants to run. | ||
840 | */ | ||
841 | if (hardirq_count()) | ||
842 | __this_cpu_add(cpu_hardirq_time, delta); | ||
843 | else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) | ||
844 | __this_cpu_add(cpu_softirq_time, delta); | ||
845 | |||
846 | irq_time_write_end(); | ||
847 | local_irq_restore(flags); | ||
848 | } | ||
849 | EXPORT_SYMBOL_GPL(account_system_vtime); | ||
850 | |||
851 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
852 | |||
853 | #ifdef CONFIG_PARAVIRT | ||
854 | static inline u64 steal_ticks(u64 steal) | ||
855 | { | ||
856 | if (unlikely(steal > NSEC_PER_SEC)) | ||
857 | return div_u64(steal, TICK_NSEC); | ||
858 | |||
859 | return __iter_div_u64_rem(steal, TICK_NSEC, &steal); | ||
860 | } | ||
861 | #endif | ||
862 | |||
863 | static void update_rq_clock_task(struct rq *rq, s64 delta) | 743 | static void update_rq_clock_task(struct rq *rq, s64 delta) |
864 | { | 744 | { |
865 | /* | 745 | /* |
@@ -920,43 +800,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) | |||
920 | #endif | 800 | #endif |
921 | } | 801 | } |
922 | 802 | ||
923 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
924 | static int irqtime_account_hi_update(void) | ||
925 | { | ||
926 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
927 | unsigned long flags; | ||
928 | u64 latest_ns; | ||
929 | int ret = 0; | ||
930 | |||
931 | local_irq_save(flags); | ||
932 | latest_ns = this_cpu_read(cpu_hardirq_time); | ||
933 | if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) | ||
934 | ret = 1; | ||
935 | local_irq_restore(flags); | ||
936 | return ret; | ||
937 | } | ||
938 | |||
939 | static int irqtime_account_si_update(void) | ||
940 | { | ||
941 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
942 | unsigned long flags; | ||
943 | u64 latest_ns; | ||
944 | int ret = 0; | ||
945 | |||
946 | local_irq_save(flags); | ||
947 | latest_ns = this_cpu_read(cpu_softirq_time); | ||
948 | if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) | ||
949 | ret = 1; | ||
950 | local_irq_restore(flags); | ||
951 | return ret; | ||
952 | } | ||
953 | |||
954 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
955 | |||
956 | #define sched_clock_irqtime (0) | ||
957 | |||
958 | #endif | ||
959 | |||
960 | void sched_set_stop_task(int cpu, struct task_struct *stop) | 803 | void sched_set_stop_task(int cpu, struct task_struct *stop) |
961 | { | 804 | { |
962 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | 805 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
@@ -1518,25 +1361,6 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu) | |||
1518 | smp_send_reschedule(cpu); | 1361 | smp_send_reschedule(cpu); |
1519 | } | 1362 | } |
1520 | 1363 | ||
1521 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1522 | static int ttwu_activate_remote(struct task_struct *p, int wake_flags) | ||
1523 | { | ||
1524 | struct rq *rq; | ||
1525 | int ret = 0; | ||
1526 | |||
1527 | rq = __task_rq_lock(p); | ||
1528 | if (p->on_cpu) { | ||
1529 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); | ||
1530 | ttwu_do_wakeup(rq, p, wake_flags); | ||
1531 | ret = 1; | ||
1532 | } | ||
1533 | __task_rq_unlock(rq); | ||
1534 | |||
1535 | return ret; | ||
1536 | |||
1537 | } | ||
1538 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
1539 | |||
1540 | bool cpus_share_cache(int this_cpu, int that_cpu) | 1364 | bool cpus_share_cache(int this_cpu, int that_cpu) |
1541 | { | 1365 | { |
1542 | return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); | 1366 | return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); |
@@ -1597,21 +1421,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
1597 | * If the owning (remote) cpu is still in the middle of schedule() with | 1421 | * If the owning (remote) cpu is still in the middle of schedule() with |
1598 | * this task as prev, wait until its done referencing the task. | 1422 | * this task as prev, wait until its done referencing the task. |
1599 | */ | 1423 | */ |
1600 | while (p->on_cpu) { | 1424 | while (p->on_cpu) |
1601 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1602 | /* | ||
1603 | * In case the architecture enables interrupts in | ||
1604 | * context_switch(), we cannot busy wait, since that | ||
1605 | * would lead to deadlocks when an interrupt hits and | ||
1606 | * tries to wake up @prev. So bail and do a complete | ||
1607 | * remote wakeup. | ||
1608 | */ | ||
1609 | if (ttwu_activate_remote(p, wake_flags)) | ||
1610 | goto stat; | ||
1611 | #else | ||
1612 | cpu_relax(); | 1425 | cpu_relax(); |
1613 | #endif | ||
1614 | } | ||
1615 | /* | 1426 | /* |
1616 | * Pairs with the smp_wmb() in finish_lock_switch(). | 1427 | * Pairs with the smp_wmb() in finish_lock_switch(). |
1617 | */ | 1428 | */ |
@@ -1953,14 +1764,9 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1953 | * Manfred Spraul <manfred@colorfullife.com> | 1764 | * Manfred Spraul <manfred@colorfullife.com> |
1954 | */ | 1765 | */ |
1955 | prev_state = prev->state; | 1766 | prev_state = prev->state; |
1767 | vtime_task_switch(prev); | ||
1956 | finish_arch_switch(prev); | 1768 | finish_arch_switch(prev); |
1957 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1958 | local_irq_disable(); | ||
1959 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
1960 | perf_event_task_sched_in(prev, current); | 1769 | perf_event_task_sched_in(prev, current); |
1961 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1962 | local_irq_enable(); | ||
1963 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
1964 | finish_lock_switch(rq, prev); | 1770 | finish_lock_switch(rq, prev); |
1965 | finish_arch_post_lock_switch(); | 1771 | finish_arch_post_lock_switch(); |
1966 | 1772 | ||
@@ -2081,6 +1887,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2081 | #endif | 1887 | #endif |
2082 | 1888 | ||
2083 | /* Here we just switch the register state and the stack. */ | 1889 | /* Here we just switch the register state and the stack. */ |
1890 | rcu_switch(prev, next); | ||
2084 | switch_to(prev, next, prev); | 1891 | switch_to(prev, next, prev); |
2085 | 1892 | ||
2086 | barrier(); | 1893 | barrier(); |
@@ -2809,404 +2616,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
2809 | return ns; | 2616 | return ns; |
2810 | } | 2617 | } |
2811 | 2618 | ||
2812 | #ifdef CONFIG_CGROUP_CPUACCT | ||
2813 | struct cgroup_subsys cpuacct_subsys; | ||
2814 | struct cpuacct root_cpuacct; | ||
2815 | #endif | ||
2816 | |||
2817 | static inline void task_group_account_field(struct task_struct *p, int index, | ||
2818 | u64 tmp) | ||
2819 | { | ||
2820 | #ifdef CONFIG_CGROUP_CPUACCT | ||
2821 | struct kernel_cpustat *kcpustat; | ||
2822 | struct cpuacct *ca; | ||
2823 | #endif | ||
2824 | /* | ||
2825 | * Since all updates are sure to touch the root cgroup, we | ||
2826 | * get ourselves ahead and touch it first. If the root cgroup | ||
2827 | * is the only cgroup, then nothing else should be necessary. | ||
2828 | * | ||
2829 | */ | ||
2830 | __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; | ||
2831 | |||
2832 | #ifdef CONFIG_CGROUP_CPUACCT | ||
2833 | if (unlikely(!cpuacct_subsys.active)) | ||
2834 | return; | ||
2835 | |||
2836 | rcu_read_lock(); | ||
2837 | ca = task_ca(p); | ||
2838 | while (ca && (ca != &root_cpuacct)) { | ||
2839 | kcpustat = this_cpu_ptr(ca->cpustat); | ||
2840 | kcpustat->cpustat[index] += tmp; | ||
2841 | ca = parent_ca(ca); | ||
2842 | } | ||
2843 | rcu_read_unlock(); | ||
2844 | #endif | ||
2845 | } | ||
2846 | |||
2847 | |||
2848 | /* | ||
2849 | * Account user cpu time to a process. | ||
2850 | * @p: the process that the cpu time gets accounted to | ||
2851 | * @cputime: the cpu time spent in user space since the last update | ||
2852 | * @cputime_scaled: cputime scaled by cpu frequency | ||
2853 | */ | ||
2854 | void account_user_time(struct task_struct *p, cputime_t cputime, | ||
2855 | cputime_t cputime_scaled) | ||
2856 | { | ||
2857 | int index; | ||
2858 | |||
2859 | /* Add user time to process. */ | ||
2860 | p->utime += cputime; | ||
2861 | p->utimescaled += cputime_scaled; | ||
2862 | account_group_user_time(p, cputime); | ||
2863 | |||
2864 | index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; | ||
2865 | |||
2866 | /* Add user time to cpustat. */ | ||
2867 | task_group_account_field(p, index, (__force u64) cputime); | ||
2868 | |||
2869 | /* Account for user time used */ | ||
2870 | acct_update_integrals(p); | ||
2871 | } | ||
2872 | |||
2873 | /* | ||
2874 | * Account guest cpu time to a process. | ||
2875 | * @p: the process that the cpu time gets accounted to | ||
2876 | * @cputime: the cpu time spent in virtual machine since the last update | ||
2877 | * @cputime_scaled: cputime scaled by cpu frequency | ||
2878 | */ | ||
2879 | static void account_guest_time(struct task_struct *p, cputime_t cputime, | ||
2880 | cputime_t cputime_scaled) | ||
2881 | { | ||
2882 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
2883 | |||
2884 | /* Add guest time to process. */ | ||
2885 | p->utime += cputime; | ||
2886 | p->utimescaled += cputime_scaled; | ||
2887 | account_group_user_time(p, cputime); | ||
2888 | p->gtime += cputime; | ||
2889 | |||
2890 | /* Add guest time to cpustat. */ | ||
2891 | if (TASK_NICE(p) > 0) { | ||
2892 | cpustat[CPUTIME_NICE] += (__force u64) cputime; | ||
2893 | cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; | ||
2894 | } else { | ||
2895 | cpustat[CPUTIME_USER] += (__force u64) cputime; | ||
2896 | cpustat[CPUTIME_GUEST] += (__force u64) cputime; | ||
2897 | } | ||
2898 | } | ||
2899 | |||
2900 | /* | ||
2901 | * Account system cpu time to a process and desired cpustat field | ||
2902 | * @p: the process that the cpu time gets accounted to | ||
2903 | * @cputime: the cpu time spent in kernel space since the last update | ||
2904 | * @cputime_scaled: cputime scaled by cpu frequency | ||
2905 | * @target_cputime64: pointer to cpustat field that has to be updated | ||
2906 | */ | ||
2907 | static inline | ||
2908 | void __account_system_time(struct task_struct *p, cputime_t cputime, | ||
2909 | cputime_t cputime_scaled, int index) | ||
2910 | { | ||
2911 | /* Add system time to process. */ | ||
2912 | p->stime += cputime; | ||
2913 | p->stimescaled += cputime_scaled; | ||
2914 | account_group_system_time(p, cputime); | ||
2915 | |||
2916 | /* Add system time to cpustat. */ | ||
2917 | task_group_account_field(p, index, (__force u64) cputime); | ||
2918 | |||
2919 | /* Account for system time used */ | ||
2920 | acct_update_integrals(p); | ||
2921 | } | ||
2922 | |||
2923 | /* | ||
2924 | * Account system cpu time to a process. | ||
2925 | * @p: the process that the cpu time gets accounted to | ||
2926 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
2927 | * @cputime: the cpu time spent in kernel space since the last update | ||
2928 | * @cputime_scaled: cputime scaled by cpu frequency | ||
2929 | */ | ||
2930 | void account_system_time(struct task_struct *p, int hardirq_offset, | ||
2931 | cputime_t cputime, cputime_t cputime_scaled) | ||
2932 | { | ||
2933 | int index; | ||
2934 | |||
2935 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | ||
2936 | account_guest_time(p, cputime, cputime_scaled); | ||
2937 | return; | ||
2938 | } | ||
2939 | |||
2940 | if (hardirq_count() - hardirq_offset) | ||
2941 | index = CPUTIME_IRQ; | ||
2942 | else if (in_serving_softirq()) | ||
2943 | index = CPUTIME_SOFTIRQ; | ||
2944 | else | ||
2945 | index = CPUTIME_SYSTEM; | ||
2946 | |||
2947 | __account_system_time(p, cputime, cputime_scaled, index); | ||
2948 | } | ||
2949 | |||
2950 | /* | ||
2951 | * Account for involuntary wait time. | ||
2952 | * @cputime: the cpu time spent in involuntary wait | ||
2953 | */ | ||
2954 | void account_steal_time(cputime_t cputime) | ||
2955 | { | ||
2956 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
2957 | |||
2958 | cpustat[CPUTIME_STEAL] += (__force u64) cputime; | ||
2959 | } | ||
2960 | |||
2961 | /* | ||
2962 | * Account for idle time. | ||
2963 | * @cputime: the cpu time spent in idle wait | ||
2964 | */ | ||
2965 | void account_idle_time(cputime_t cputime) | ||
2966 | { | ||
2967 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
2968 | struct rq *rq = this_rq(); | ||
2969 | |||
2970 | if (atomic_read(&rq->nr_iowait) > 0) | ||
2971 | cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; | ||
2972 | else | ||
2973 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; | ||
2974 | } | ||
2975 | |||
2976 | static __always_inline bool steal_account_process_tick(void) | ||
2977 | { | ||
2978 | #ifdef CONFIG_PARAVIRT | ||
2979 | if (static_key_false(¶virt_steal_enabled)) { | ||
2980 | u64 steal, st = 0; | ||
2981 | |||
2982 | steal = paravirt_steal_clock(smp_processor_id()); | ||
2983 | steal -= this_rq()->prev_steal_time; | ||
2984 | |||
2985 | st = steal_ticks(steal); | ||
2986 | this_rq()->prev_steal_time += st * TICK_NSEC; | ||
2987 | |||
2988 | account_steal_time(st); | ||
2989 | return st; | ||
2990 | } | ||
2991 | #endif | ||
2992 | return false; | ||
2993 | } | ||
2994 | |||
2995 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
2996 | |||
2997 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
2998 | /* | ||
2999 | * Account a tick to a process and cpustat | ||
3000 | * @p: the process that the cpu time gets accounted to | ||
3001 | * @user_tick: is the tick from userspace | ||
3002 | * @rq: the pointer to rq | ||
3003 | * | ||
3004 | * Tick demultiplexing follows the order | ||
3005 | * - pending hardirq update | ||
3006 | * - pending softirq update | ||
3007 | * - user_time | ||
3008 | * - idle_time | ||
3009 | * - system time | ||
3010 | * - check for guest_time | ||
3011 | * - else account as system_time | ||
3012 | * | ||
3013 | * Check for hardirq is done both for system and user time as there is | ||
3014 | * no timer going off while we are on hardirq and hence we may never get an | ||
3015 | * opportunity to update it solely in system time. | ||
3016 | * p->stime and friends are only updated on system time and not on irq | ||
3017 | * softirq as those do not count in task exec_runtime any more. | ||
3018 | */ | ||
3019 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | ||
3020 | struct rq *rq) | ||
3021 | { | ||
3022 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | ||
3023 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
3024 | |||
3025 | if (steal_account_process_tick()) | ||
3026 | return; | ||
3027 | |||
3028 | if (irqtime_account_hi_update()) { | ||
3029 | cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; | ||
3030 | } else if (irqtime_account_si_update()) { | ||
3031 | cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; | ||
3032 | } else if (this_cpu_ksoftirqd() == p) { | ||
3033 | /* | ||
3034 | * ksoftirqd time do not get accounted in cpu_softirq_time. | ||
3035 | * So, we have to handle it separately here. | ||
3036 | * Also, p->stime needs to be updated for ksoftirqd. | ||
3037 | */ | ||
3038 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | ||
3039 | CPUTIME_SOFTIRQ); | ||
3040 | } else if (user_tick) { | ||
3041 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | ||
3042 | } else if (p == rq->idle) { | ||
3043 | account_idle_time(cputime_one_jiffy); | ||
3044 | } else if (p->flags & PF_VCPU) { /* System time or guest time */ | ||
3045 | account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); | ||
3046 | } else { | ||
3047 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | ||
3048 | CPUTIME_SYSTEM); | ||
3049 | } | ||
3050 | } | ||
3051 | |||
3052 | static void irqtime_account_idle_ticks(int ticks) | ||
3053 | { | ||
3054 | int i; | ||
3055 | struct rq *rq = this_rq(); | ||
3056 | |||
3057 | for (i = 0; i < ticks; i++) | ||
3058 | irqtime_account_process_tick(current, 0, rq); | ||
3059 | } | ||
3060 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
3061 | static void irqtime_account_idle_ticks(int ticks) {} | ||
3062 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | ||
3063 | struct rq *rq) {} | ||
3064 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
3065 | |||
3066 | /* | ||
3067 | * Account a single tick of cpu time. | ||
3068 | * @p: the process that the cpu time gets accounted to | ||
3069 | * @user_tick: indicates if the tick is a user or a system tick | ||
3070 | */ | ||
3071 | void account_process_tick(struct task_struct *p, int user_tick) | ||
3072 | { | ||
3073 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | ||
3074 | struct rq *rq = this_rq(); | ||
3075 | |||
3076 | if (sched_clock_irqtime) { | ||
3077 | irqtime_account_process_tick(p, user_tick, rq); | ||
3078 | return; | ||
3079 | } | ||
3080 | |||
3081 | if (steal_account_process_tick()) | ||
3082 | return; | ||
3083 | |||
3084 | if (user_tick) | ||
3085 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | ||
3086 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) | ||
3087 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, | ||
3088 | one_jiffy_scaled); | ||
3089 | else | ||
3090 | account_idle_time(cputime_one_jiffy); | ||
3091 | } | ||
3092 | |||
3093 | /* | ||
3094 | * Account multiple ticks of steal time. | ||
3095 | * @p: the process from which the cpu time has been stolen | ||
3096 | * @ticks: number of stolen ticks | ||
3097 | */ | ||
3098 | void account_steal_ticks(unsigned long ticks) | ||
3099 | { | ||
3100 | account_steal_time(jiffies_to_cputime(ticks)); | ||
3101 | } | ||
3102 | |||
3103 | /* | ||
3104 | * Account multiple ticks of idle time. | ||
3105 | * @ticks: number of stolen ticks | ||
3106 | */ | ||
3107 | void account_idle_ticks(unsigned long ticks) | ||
3108 | { | ||
3109 | |||
3110 | if (sched_clock_irqtime) { | ||
3111 | irqtime_account_idle_ticks(ticks); | ||
3112 | return; | ||
3113 | } | ||
3114 | |||
3115 | account_idle_time(jiffies_to_cputime(ticks)); | ||
3116 | } | ||
3117 | |||
3118 | #endif | ||
3119 | |||
3120 | /* | ||
3121 | * Use precise platform statistics if available: | ||
3122 | */ | ||
3123 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
3124 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
3125 | { | ||
3126 | *ut = p->utime; | ||
3127 | *st = p->stime; | ||
3128 | } | ||
3129 | |||
3130 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
3131 | { | ||
3132 | struct task_cputime cputime; | ||
3133 | |||
3134 | thread_group_cputime(p, &cputime); | ||
3135 | |||
3136 | *ut = cputime.utime; | ||
3137 | *st = cputime.stime; | ||
3138 | } | ||
3139 | #else | ||
3140 | |||
3141 | #ifndef nsecs_to_cputime | ||
3142 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) | ||
3143 | #endif | ||
3144 | |||
3145 | static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) | ||
3146 | { | ||
3147 | u64 temp = (__force u64) rtime; | ||
3148 | |||
3149 | temp *= (__force u64) utime; | ||
3150 | |||
3151 | if (sizeof(cputime_t) == 4) | ||
3152 | temp = div_u64(temp, (__force u32) total); | ||
3153 | else | ||
3154 | temp = div64_u64(temp, (__force u64) total); | ||
3155 | |||
3156 | return (__force cputime_t) temp; | ||
3157 | } | ||
3158 | |||
3159 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
3160 | { | ||
3161 | cputime_t rtime, utime = p->utime, total = utime + p->stime; | ||
3162 | |||
3163 | /* | ||
3164 | * Use CFS's precise accounting: | ||
3165 | */ | ||
3166 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | ||
3167 | |||
3168 | if (total) | ||
3169 | utime = scale_utime(utime, rtime, total); | ||
3170 | else | ||
3171 | utime = rtime; | ||
3172 | |||
3173 | /* | ||
3174 | * Compare with previous values, to keep monotonicity: | ||
3175 | */ | ||
3176 | p->prev_utime = max(p->prev_utime, utime); | ||
3177 | p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); | ||
3178 | |||
3179 | *ut = p->prev_utime; | ||
3180 | *st = p->prev_stime; | ||
3181 | } | ||
3182 | |||
3183 | /* | ||
3184 | * Must be called with siglock held. | ||
3185 | */ | ||
3186 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
3187 | { | ||
3188 | struct signal_struct *sig = p->signal; | ||
3189 | struct task_cputime cputime; | ||
3190 | cputime_t rtime, utime, total; | ||
3191 | |||
3192 | thread_group_cputime(p, &cputime); | ||
3193 | |||
3194 | total = cputime.utime + cputime.stime; | ||
3195 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | ||
3196 | |||
3197 | if (total) | ||
3198 | utime = scale_utime(cputime.utime, rtime, total); | ||
3199 | else | ||
3200 | utime = rtime; | ||
3201 | |||
3202 | sig->prev_utime = max(sig->prev_utime, utime); | ||
3203 | sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime); | ||
3204 | |||
3205 | *ut = sig->prev_utime; | ||
3206 | *st = sig->prev_stime; | ||
3207 | } | ||
3208 | #endif | ||
3209 | |||
3210 | /* | 2619 | /* |
3211 | * This function gets called by the timer code, with HZ frequency. | 2620 | * This function gets called by the timer code, with HZ frequency. |
3212 | * We call it with interrupts disabled. | 2621 | * We call it with interrupts disabled. |
@@ -3367,6 +2776,40 @@ pick_next_task(struct rq *rq) | |||
3367 | 2776 | ||
3368 | /* | 2777 | /* |
3369 | * __schedule() is the main scheduler function. | 2778 | * __schedule() is the main scheduler function. |
2779 | * | ||
2780 | * The main means of driving the scheduler and thus entering this function are: | ||
2781 | * | ||
2782 | * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. | ||
2783 | * | ||
2784 | * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return | ||
2785 | * paths. For example, see arch/x86/entry_64.S. | ||
2786 | * | ||
2787 | * To drive preemption between tasks, the scheduler sets the flag in timer | ||
2788 | * interrupt handler scheduler_tick(). | ||
2789 | * | ||
2790 | * 3. Wakeups don't really cause entry into schedule(). They add a | ||
2791 | * task to the run-queue and that's it. | ||
2792 | * | ||
2793 | * Now, if the new task added to the run-queue preempts the current | ||
2794 | * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets | ||
2795 | * called on the nearest possible occasion: | ||
2796 | * | ||
2797 | * - If the kernel is preemptible (CONFIG_PREEMPT=y): | ||
2798 | * | ||
2799 | * - in syscall or exception context, at the next outmost | ||
2800 | * preempt_enable(). (this might be as soon as the wake_up()'s | ||
2801 | * spin_unlock()!) | ||
2802 | * | ||
2803 | * - in IRQ context, return from interrupt-handler to | ||
2804 | * preemptible context | ||
2805 | * | ||
2806 | * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) | ||
2807 | * then at the next: | ||
2808 | * | ||
2809 | * - cond_resched() call | ||
2810 | * - explicit schedule() call | ||
2811 | * - return from syscall or exception to user-space | ||
2812 | * - return from interrupt-handler to user-space | ||
3370 | */ | 2813 | */ |
3371 | static void __sched __schedule(void) | 2814 | static void __sched __schedule(void) |
3372 | { | 2815 | { |
@@ -3468,6 +2911,21 @@ asmlinkage void __sched schedule(void) | |||
3468 | } | 2911 | } |
3469 | EXPORT_SYMBOL(schedule); | 2912 | EXPORT_SYMBOL(schedule); |
3470 | 2913 | ||
2914 | #ifdef CONFIG_RCU_USER_QS | ||
2915 | asmlinkage void __sched schedule_user(void) | ||
2916 | { | ||
2917 | /* | ||
2918 | * If we come here after a random call to set_need_resched(), | ||
2919 | * or we have been woken up remotely but the IPI has not yet arrived, | ||
2920 | * we haven't yet exited the RCU idle mode. Do it here manually until | ||
2921 | * we find a better solution. | ||
2922 | */ | ||
2923 | rcu_user_exit(); | ||
2924 | schedule(); | ||
2925 | rcu_user_enter(); | ||
2926 | } | ||
2927 | #endif | ||
2928 | |||
3471 | /** | 2929 | /** |
3472 | * schedule_preempt_disabled - called with preemption disabled | 2930 | * schedule_preempt_disabled - called with preemption disabled |
3473 | * | 2931 | * |
@@ -3569,6 +3027,7 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
3569 | /* Catch callers which need to be fixed */ | 3027 | /* Catch callers which need to be fixed */ |
3570 | BUG_ON(ti->preempt_count || !irqs_disabled()); | 3028 | BUG_ON(ti->preempt_count || !irqs_disabled()); |
3571 | 3029 | ||
3030 | rcu_user_exit(); | ||
3572 | do { | 3031 | do { |
3573 | add_preempt_count(PREEMPT_ACTIVE); | 3032 | add_preempt_count(PREEMPT_ACTIVE); |
3574 | local_irq_enable(); | 3033 | local_irq_enable(); |
@@ -4868,13 +4327,6 @@ again: | |||
4868 | */ | 4327 | */ |
4869 | if (preempt && rq != p_rq) | 4328 | if (preempt && rq != p_rq) |
4870 | resched_task(p_rq->curr); | 4329 | resched_task(p_rq->curr); |
4871 | } else { | ||
4872 | /* | ||
4873 | * We might have set it in task_yield_fair(), but are | ||
4874 | * not going to schedule(), so don't want to skip | ||
4875 | * the next update. | ||
4876 | */ | ||
4877 | rq->skip_clock_update = 0; | ||
4878 | } | 4330 | } |
4879 | 4331 | ||
4880 | out: | 4332 | out: |
@@ -5304,27 +4756,17 @@ void idle_task_exit(void) | |||
5304 | } | 4756 | } |
5305 | 4757 | ||
5306 | /* | 4758 | /* |
5307 | * While a dead CPU has no uninterruptible tasks queued at this point, | 4759 | * Since this CPU is going 'away' for a while, fold any nr_active delta |
5308 | * it might still have a nonzero ->nr_uninterruptible counter, because | 4760 | * we might have. Assumes we're called after migrate_tasks() so that the |
5309 | * for performance reasons the counter is not stricly tracking tasks to | 4761 | * nr_active count is stable. |
5310 | * their home CPUs. So we just add the counter to another CPU's counter, | 4762 | * |
5311 | * to keep the global sum constant after CPU-down: | 4763 | * Also see the comment "Global load-average calculations". |
5312 | */ | ||
5313 | static void migrate_nr_uninterruptible(struct rq *rq_src) | ||
5314 | { | ||
5315 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); | ||
5316 | |||
5317 | rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; | ||
5318 | rq_src->nr_uninterruptible = 0; | ||
5319 | } | ||
5320 | |||
5321 | /* | ||
5322 | * remove the tasks which were accounted by rq from calc_load_tasks. | ||
5323 | */ | 4764 | */ |
5324 | static void calc_global_load_remove(struct rq *rq) | 4765 | static void calc_load_migrate(struct rq *rq) |
5325 | { | 4766 | { |
5326 | atomic_long_sub(rq->calc_load_active, &calc_load_tasks); | 4767 | long delta = calc_load_fold_active(rq); |
5327 | rq->calc_load_active = 0; | 4768 | if (delta) |
4769 | atomic_long_add(delta, &calc_load_tasks); | ||
5328 | } | 4770 | } |
5329 | 4771 | ||
5330 | /* | 4772 | /* |
@@ -5352,9 +4794,6 @@ static void migrate_tasks(unsigned int dead_cpu) | |||
5352 | */ | 4794 | */ |
5353 | rq->stop = NULL; | 4795 | rq->stop = NULL; |
5354 | 4796 | ||
5355 | /* Ensure any throttled groups are reachable by pick_next_task */ | ||
5356 | unthrottle_offline_cfs_rqs(rq); | ||
5357 | |||
5358 | for ( ; ; ) { | 4797 | for ( ; ; ) { |
5359 | /* | 4798 | /* |
5360 | * There's this thread running, bail when that's the only | 4799 | * There's this thread running, bail when that's the only |
@@ -5429,16 +4868,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) | |||
5429 | *tablep = NULL; | 4868 | *tablep = NULL; |
5430 | } | 4869 | } |
5431 | 4870 | ||
4871 | static int min_load_idx = 0; | ||
4872 | static int max_load_idx = CPU_LOAD_IDX_MAX; | ||
4873 | |||
5432 | static void | 4874 | static void |
5433 | set_table_entry(struct ctl_table *entry, | 4875 | set_table_entry(struct ctl_table *entry, |
5434 | const char *procname, void *data, int maxlen, | 4876 | const char *procname, void *data, int maxlen, |
5435 | umode_t mode, proc_handler *proc_handler) | 4877 | umode_t mode, proc_handler *proc_handler, |
4878 | bool load_idx) | ||
5436 | { | 4879 | { |
5437 | entry->procname = procname; | 4880 | entry->procname = procname; |
5438 | entry->data = data; | 4881 | entry->data = data; |
5439 | entry->maxlen = maxlen; | 4882 | entry->maxlen = maxlen; |
5440 | entry->mode = mode; | 4883 | entry->mode = mode; |
5441 | entry->proc_handler = proc_handler; | 4884 | entry->proc_handler = proc_handler; |
4885 | |||
4886 | if (load_idx) { | ||
4887 | entry->extra1 = &min_load_idx; | ||
4888 | entry->extra2 = &max_load_idx; | ||
4889 | } | ||
5442 | } | 4890 | } |
5443 | 4891 | ||
5444 | static struct ctl_table * | 4892 | static struct ctl_table * |
@@ -5450,30 +4898,30 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) | |||
5450 | return NULL; | 4898 | return NULL; |
5451 | 4899 | ||
5452 | set_table_entry(&table[0], "min_interval", &sd->min_interval, | 4900 | set_table_entry(&table[0], "min_interval", &sd->min_interval, |
5453 | sizeof(long), 0644, proc_doulongvec_minmax); | 4901 | sizeof(long), 0644, proc_doulongvec_minmax, false); |
5454 | set_table_entry(&table[1], "max_interval", &sd->max_interval, | 4902 | set_table_entry(&table[1], "max_interval", &sd->max_interval, |
5455 | sizeof(long), 0644, proc_doulongvec_minmax); | 4903 | sizeof(long), 0644, proc_doulongvec_minmax, false); |
5456 | set_table_entry(&table[2], "busy_idx", &sd->busy_idx, | 4904 | set_table_entry(&table[2], "busy_idx", &sd->busy_idx, |
5457 | sizeof(int), 0644, proc_dointvec_minmax); | 4905 | sizeof(int), 0644, proc_dointvec_minmax, true); |
5458 | set_table_entry(&table[3], "idle_idx", &sd->idle_idx, | 4906 | set_table_entry(&table[3], "idle_idx", &sd->idle_idx, |
5459 | sizeof(int), 0644, proc_dointvec_minmax); | 4907 | sizeof(int), 0644, proc_dointvec_minmax, true); |
5460 | set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, | 4908 | set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, |
5461 | sizeof(int), 0644, proc_dointvec_minmax); | 4909 | sizeof(int), 0644, proc_dointvec_minmax, true); |
5462 | set_table_entry(&table[5], "wake_idx", &sd->wake_idx, | 4910 | set_table_entry(&table[5], "wake_idx", &sd->wake_idx, |
5463 | sizeof(int), 0644, proc_dointvec_minmax); | 4911 | sizeof(int), 0644, proc_dointvec_minmax, true); |
5464 | set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, | 4912 | set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, |
5465 | sizeof(int), 0644, proc_dointvec_minmax); | 4913 | sizeof(int), 0644, proc_dointvec_minmax, true); |
5466 | set_table_entry(&table[7], "busy_factor", &sd->busy_factor, | 4914 | set_table_entry(&table[7], "busy_factor", &sd->busy_factor, |
5467 | sizeof(int), 0644, proc_dointvec_minmax); | 4915 | sizeof(int), 0644, proc_dointvec_minmax, false); |
5468 | set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, | 4916 | set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, |
5469 | sizeof(int), 0644, proc_dointvec_minmax); | 4917 | sizeof(int), 0644, proc_dointvec_minmax, false); |
5470 | set_table_entry(&table[9], "cache_nice_tries", | 4918 | set_table_entry(&table[9], "cache_nice_tries", |
5471 | &sd->cache_nice_tries, | 4919 | &sd->cache_nice_tries, |
5472 | sizeof(int), 0644, proc_dointvec_minmax); | 4920 | sizeof(int), 0644, proc_dointvec_minmax, false); |
5473 | set_table_entry(&table[10], "flags", &sd->flags, | 4921 | set_table_entry(&table[10], "flags", &sd->flags, |
5474 | sizeof(int), 0644, proc_dointvec_minmax); | 4922 | sizeof(int), 0644, proc_dointvec_minmax, false); |
5475 | set_table_entry(&table[11], "name", sd->name, | 4923 | set_table_entry(&table[11], "name", sd->name, |
5476 | CORENAME_MAX_SIZE, 0444, proc_dostring); | 4924 | CORENAME_MAX_SIZE, 0444, proc_dostring, false); |
5477 | /* &table[12] is terminator */ | 4925 | /* &table[12] is terminator */ |
5478 | 4926 | ||
5479 | return table; | 4927 | return table; |
@@ -5617,9 +5065,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
5617 | migrate_tasks(cpu); | 5065 | migrate_tasks(cpu); |
5618 | BUG_ON(rq->nr_running != 1); /* the migration thread */ | 5066 | BUG_ON(rq->nr_running != 1); /* the migration thread */ |
5619 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 5067 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
5068 | break; | ||
5620 | 5069 | ||
5621 | migrate_nr_uninterruptible(rq); | 5070 | case CPU_DEAD: |
5622 | calc_global_load_remove(rq); | 5071 | calc_load_migrate(rq); |
5623 | break; | 5072 | break; |
5624 | #endif | 5073 | #endif |
5625 | } | 5074 | } |
@@ -6028,11 +5477,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu) | |||
6028 | * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this | 5477 | * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this |
6029 | * allows us to avoid some pointer chasing select_idle_sibling(). | 5478 | * allows us to avoid some pointer chasing select_idle_sibling(). |
6030 | * | 5479 | * |
6031 | * Iterate domains and sched_groups downward, assigning CPUs to be | ||
6032 | * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing | ||
6033 | * due to random perturbation self canceling, ie sw buddies pull | ||
6034 | * their counterpart to their CPU's hw counterpart. | ||
6035 | * | ||
6036 | * Also keep a unique ID per domain (we use the first cpu number in | 5480 | * Also keep a unique ID per domain (we use the first cpu number in |
6037 | * the cpumask of the domain), this allows us to quickly tell if | 5481 | * the cpumask of the domain), this allows us to quickly tell if |
6038 | * two cpus are in the same cache domain, see cpus_share_cache(). | 5482 | * two cpus are in the same cache domain, see cpus_share_cache(). |
@@ -6046,40 +5490,8 @@ static void update_top_cache_domain(int cpu) | |||
6046 | int id = cpu; | 5490 | int id = cpu; |
6047 | 5491 | ||
6048 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); | 5492 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); |
6049 | if (sd) { | 5493 | if (sd) |
6050 | struct sched_domain *tmp = sd; | ||
6051 | struct sched_group *sg, *prev; | ||
6052 | bool right; | ||
6053 | |||
6054 | /* | ||
6055 | * Traverse to first CPU in group, and count hops | ||
6056 | * to cpu from there, switching direction on each | ||
6057 | * hop, never ever pointing the last CPU rightward. | ||
6058 | */ | ||
6059 | do { | ||
6060 | id = cpumask_first(sched_domain_span(tmp)); | ||
6061 | prev = sg = tmp->groups; | ||
6062 | right = 1; | ||
6063 | |||
6064 | while (cpumask_first(sched_group_cpus(sg)) != id) | ||
6065 | sg = sg->next; | ||
6066 | |||
6067 | while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) { | ||
6068 | prev = sg; | ||
6069 | sg = sg->next; | ||
6070 | right = !right; | ||
6071 | } | ||
6072 | |||
6073 | /* A CPU went down, never point back to domain start. */ | ||
6074 | if (right && cpumask_first(sched_group_cpus(sg->next)) == id) | ||
6075 | right = false; | ||
6076 | |||
6077 | sg = right ? sg->next : prev; | ||
6078 | tmp->idle_buddy = cpumask_first(sched_group_cpus(sg)); | ||
6079 | } while ((tmp = tmp->child)); | ||
6080 | |||
6081 | id = cpumask_first(sched_domain_span(sd)); | 5494 | id = cpumask_first(sched_domain_span(sd)); |
6082 | } | ||
6083 | 5495 | ||
6084 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); | 5496 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
6085 | per_cpu(sd_llc_id, cpu) = id; | 5497 | per_cpu(sd_llc_id, cpu) = id; |
@@ -6588,7 +6000,6 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) | |||
6588 | | 0*SD_BALANCE_FORK | 6000 | | 0*SD_BALANCE_FORK |
6589 | | 0*SD_BALANCE_WAKE | 6001 | | 0*SD_BALANCE_WAKE |
6590 | | 0*SD_WAKE_AFFINE | 6002 | | 0*SD_WAKE_AFFINE |
6591 | | 0*SD_PREFER_LOCAL | ||
6592 | | 0*SD_SHARE_CPUPOWER | 6003 | | 0*SD_SHARE_CPUPOWER |
6593 | | 0*SD_SHARE_PKG_RESOURCES | 6004 | | 0*SD_SHARE_PKG_RESOURCES |
6594 | | 1*SD_SERIALIZE | 6005 | | 1*SD_SERIALIZE |
@@ -8386,6 +7797,8 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
8386 | * (balbir@in.ibm.com). | 7797 | * (balbir@in.ibm.com). |
8387 | */ | 7798 | */ |
8388 | 7799 | ||
7800 | struct cpuacct root_cpuacct; | ||
7801 | |||
8389 | /* create a new cpu accounting group */ | 7802 | /* create a new cpu accounting group */ |
8390 | static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp) | 7803 | static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp) |
8391 | { | 7804 | { |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c new file mode 100644 index 000000000000..81b763ba58a6 --- /dev/null +++ b/kernel/sched/cputime.c | |||
@@ -0,0 +1,530 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/tsacct_kern.h> | ||
4 | #include <linux/kernel_stat.h> | ||
5 | #include <linux/static_key.h> | ||
6 | #include "sched.h" | ||
7 | |||
8 | |||
9 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
10 | |||
11 | /* | ||
12 | * There are no locks covering percpu hardirq/softirq time. | ||
13 | * They are only modified in vtime_account, on corresponding CPU | ||
14 | * with interrupts disabled. So, writes are safe. | ||
15 | * They are read and saved off onto struct rq in update_rq_clock(). | ||
16 | * This may result in other CPU reading this CPU's irq time and can | ||
17 | * race with irq/vtime_account on this CPU. We would either get old | ||
18 | * or new value with a side effect of accounting a slice of irq time to wrong | ||
19 | * task when irq is in progress while we read rq->clock. That is a worthy | ||
20 | * compromise in place of having locks on each irq in account_system_time. | ||
21 | */ | ||
22 | DEFINE_PER_CPU(u64, cpu_hardirq_time); | ||
23 | DEFINE_PER_CPU(u64, cpu_softirq_time); | ||
24 | |||
25 | static DEFINE_PER_CPU(u64, irq_start_time); | ||
26 | static int sched_clock_irqtime; | ||
27 | |||
28 | void enable_sched_clock_irqtime(void) | ||
29 | { | ||
30 | sched_clock_irqtime = 1; | ||
31 | } | ||
32 | |||
33 | void disable_sched_clock_irqtime(void) | ||
34 | { | ||
35 | sched_clock_irqtime = 0; | ||
36 | } | ||
37 | |||
38 | #ifndef CONFIG_64BIT | ||
39 | DEFINE_PER_CPU(seqcount_t, irq_time_seq); | ||
40 | #endif /* CONFIG_64BIT */ | ||
41 | |||
42 | /* | ||
43 | * Called before incrementing preempt_count on {soft,}irq_enter | ||
44 | * and before decrementing preempt_count on {soft,}irq_exit. | ||
45 | */ | ||
46 | void vtime_account(struct task_struct *curr) | ||
47 | { | ||
48 | unsigned long flags; | ||
49 | s64 delta; | ||
50 | int cpu; | ||
51 | |||
52 | if (!sched_clock_irqtime) | ||
53 | return; | ||
54 | |||
55 | local_irq_save(flags); | ||
56 | |||
57 | cpu = smp_processor_id(); | ||
58 | delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); | ||
59 | __this_cpu_add(irq_start_time, delta); | ||
60 | |||
61 | irq_time_write_begin(); | ||
62 | /* | ||
63 | * We do not account for softirq time from ksoftirqd here. | ||
64 | * We want to continue accounting softirq time to ksoftirqd thread | ||
65 | * in that case, so as not to confuse scheduler with a special task | ||
66 | * that do not consume any time, but still wants to run. | ||
67 | */ | ||
68 | if (hardirq_count()) | ||
69 | __this_cpu_add(cpu_hardirq_time, delta); | ||
70 | else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) | ||
71 | __this_cpu_add(cpu_softirq_time, delta); | ||
72 | |||
73 | irq_time_write_end(); | ||
74 | local_irq_restore(flags); | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(vtime_account); | ||
77 | |||
78 | static int irqtime_account_hi_update(void) | ||
79 | { | ||
80 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
81 | unsigned long flags; | ||
82 | u64 latest_ns; | ||
83 | int ret = 0; | ||
84 | |||
85 | local_irq_save(flags); | ||
86 | latest_ns = this_cpu_read(cpu_hardirq_time); | ||
87 | if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) | ||
88 | ret = 1; | ||
89 | local_irq_restore(flags); | ||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | static int irqtime_account_si_update(void) | ||
94 | { | ||
95 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
96 | unsigned long flags; | ||
97 | u64 latest_ns; | ||
98 | int ret = 0; | ||
99 | |||
100 | local_irq_save(flags); | ||
101 | latest_ns = this_cpu_read(cpu_softirq_time); | ||
102 | if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) | ||
103 | ret = 1; | ||
104 | local_irq_restore(flags); | ||
105 | return ret; | ||
106 | } | ||
107 | |||
108 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
109 | |||
110 | #define sched_clock_irqtime (0) | ||
111 | |||
112 | #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ | ||
113 | |||
114 | static inline void task_group_account_field(struct task_struct *p, int index, | ||
115 | u64 tmp) | ||
116 | { | ||
117 | #ifdef CONFIG_CGROUP_CPUACCT | ||
118 | struct kernel_cpustat *kcpustat; | ||
119 | struct cpuacct *ca; | ||
120 | #endif | ||
121 | /* | ||
122 | * Since all updates are sure to touch the root cgroup, we | ||
123 | * get ourselves ahead and touch it first. If the root cgroup | ||
124 | * is the only cgroup, then nothing else should be necessary. | ||
125 | * | ||
126 | */ | ||
127 | __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; | ||
128 | |||
129 | #ifdef CONFIG_CGROUP_CPUACCT | ||
130 | if (unlikely(!cpuacct_subsys.active)) | ||
131 | return; | ||
132 | |||
133 | rcu_read_lock(); | ||
134 | ca = task_ca(p); | ||
135 | while (ca && (ca != &root_cpuacct)) { | ||
136 | kcpustat = this_cpu_ptr(ca->cpustat); | ||
137 | kcpustat->cpustat[index] += tmp; | ||
138 | ca = parent_ca(ca); | ||
139 | } | ||
140 | rcu_read_unlock(); | ||
141 | #endif | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Account user cpu time to a process. | ||
146 | * @p: the process that the cpu time gets accounted to | ||
147 | * @cputime: the cpu time spent in user space since the last update | ||
148 | * @cputime_scaled: cputime scaled by cpu frequency | ||
149 | */ | ||
150 | void account_user_time(struct task_struct *p, cputime_t cputime, | ||
151 | cputime_t cputime_scaled) | ||
152 | { | ||
153 | int index; | ||
154 | |||
155 | /* Add user time to process. */ | ||
156 | p->utime += cputime; | ||
157 | p->utimescaled += cputime_scaled; | ||
158 | account_group_user_time(p, cputime); | ||
159 | |||
160 | index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; | ||
161 | |||
162 | /* Add user time to cpustat. */ | ||
163 | task_group_account_field(p, index, (__force u64) cputime); | ||
164 | |||
165 | /* Account for user time used */ | ||
166 | acct_update_integrals(p); | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Account guest cpu time to a process. | ||
171 | * @p: the process that the cpu time gets accounted to | ||
172 | * @cputime: the cpu time spent in virtual machine since the last update | ||
173 | * @cputime_scaled: cputime scaled by cpu frequency | ||
174 | */ | ||
175 | static void account_guest_time(struct task_struct *p, cputime_t cputime, | ||
176 | cputime_t cputime_scaled) | ||
177 | { | ||
178 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
179 | |||
180 | /* Add guest time to process. */ | ||
181 | p->utime += cputime; | ||
182 | p->utimescaled += cputime_scaled; | ||
183 | account_group_user_time(p, cputime); | ||
184 | p->gtime += cputime; | ||
185 | |||
186 | /* Add guest time to cpustat. */ | ||
187 | if (TASK_NICE(p) > 0) { | ||
188 | cpustat[CPUTIME_NICE] += (__force u64) cputime; | ||
189 | cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; | ||
190 | } else { | ||
191 | cpustat[CPUTIME_USER] += (__force u64) cputime; | ||
192 | cpustat[CPUTIME_GUEST] += (__force u64) cputime; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Account system cpu time to a process and desired cpustat field | ||
198 | * @p: the process that the cpu time gets accounted to | ||
199 | * @cputime: the cpu time spent in kernel space since the last update | ||
200 | * @cputime_scaled: cputime scaled by cpu frequency | ||
201 | * @target_cputime64: pointer to cpustat field that has to be updated | ||
202 | */ | ||
203 | static inline | ||
204 | void __account_system_time(struct task_struct *p, cputime_t cputime, | ||
205 | cputime_t cputime_scaled, int index) | ||
206 | { | ||
207 | /* Add system time to process. */ | ||
208 | p->stime += cputime; | ||
209 | p->stimescaled += cputime_scaled; | ||
210 | account_group_system_time(p, cputime); | ||
211 | |||
212 | /* Add system time to cpustat. */ | ||
213 | task_group_account_field(p, index, (__force u64) cputime); | ||
214 | |||
215 | /* Account for system time used */ | ||
216 | acct_update_integrals(p); | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * Account system cpu time to a process. | ||
221 | * @p: the process that the cpu time gets accounted to | ||
222 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
223 | * @cputime: the cpu time spent in kernel space since the last update | ||
224 | * @cputime_scaled: cputime scaled by cpu frequency | ||
225 | */ | ||
226 | void account_system_time(struct task_struct *p, int hardirq_offset, | ||
227 | cputime_t cputime, cputime_t cputime_scaled) | ||
228 | { | ||
229 | int index; | ||
230 | |||
231 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | ||
232 | account_guest_time(p, cputime, cputime_scaled); | ||
233 | return; | ||
234 | } | ||
235 | |||
236 | if (hardirq_count() - hardirq_offset) | ||
237 | index = CPUTIME_IRQ; | ||
238 | else if (in_serving_softirq()) | ||
239 | index = CPUTIME_SOFTIRQ; | ||
240 | else | ||
241 | index = CPUTIME_SYSTEM; | ||
242 | |||
243 | __account_system_time(p, cputime, cputime_scaled, index); | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * Account for involuntary wait time. | ||
248 | * @cputime: the cpu time spent in involuntary wait | ||
249 | */ | ||
250 | void account_steal_time(cputime_t cputime) | ||
251 | { | ||
252 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
253 | |||
254 | cpustat[CPUTIME_STEAL] += (__force u64) cputime; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * Account for idle time. | ||
259 | * @cputime: the cpu time spent in idle wait | ||
260 | */ | ||
261 | void account_idle_time(cputime_t cputime) | ||
262 | { | ||
263 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
264 | struct rq *rq = this_rq(); | ||
265 | |||
266 | if (atomic_read(&rq->nr_iowait) > 0) | ||
267 | cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; | ||
268 | else | ||
269 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; | ||
270 | } | ||
271 | |||
272 | static __always_inline bool steal_account_process_tick(void) | ||
273 | { | ||
274 | #ifdef CONFIG_PARAVIRT | ||
275 | if (static_key_false(¶virt_steal_enabled)) { | ||
276 | u64 steal, st = 0; | ||
277 | |||
278 | steal = paravirt_steal_clock(smp_processor_id()); | ||
279 | steal -= this_rq()->prev_steal_time; | ||
280 | |||
281 | st = steal_ticks(steal); | ||
282 | this_rq()->prev_steal_time += st * TICK_NSEC; | ||
283 | |||
284 | account_steal_time(st); | ||
285 | return st; | ||
286 | } | ||
287 | #endif | ||
288 | return false; | ||
289 | } | ||
290 | |||
291 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
292 | |||
293 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
294 | /* | ||
295 | * Account a tick to a process and cpustat | ||
296 | * @p: the process that the cpu time gets accounted to | ||
297 | * @user_tick: is the tick from userspace | ||
298 | * @rq: the pointer to rq | ||
299 | * | ||
300 | * Tick demultiplexing follows the order | ||
301 | * - pending hardirq update | ||
302 | * - pending softirq update | ||
303 | * - user_time | ||
304 | * - idle_time | ||
305 | * - system time | ||
306 | * - check for guest_time | ||
307 | * - else account as system_time | ||
308 | * | ||
309 | * Check for hardirq is done both for system and user time as there is | ||
310 | * no timer going off while we are on hardirq and hence we may never get an | ||
311 | * opportunity to update it solely in system time. | ||
312 | * p->stime and friends are only updated on system time and not on irq | ||
313 | * softirq as those do not count in task exec_runtime any more. | ||
314 | */ | ||
315 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | ||
316 | struct rq *rq) | ||
317 | { | ||
318 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | ||
319 | u64 *cpustat = kcpustat_this_cpu->cpustat; | ||
320 | |||
321 | if (steal_account_process_tick()) | ||
322 | return; | ||
323 | |||
324 | if (irqtime_account_hi_update()) { | ||
325 | cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; | ||
326 | } else if (irqtime_account_si_update()) { | ||
327 | cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; | ||
328 | } else if (this_cpu_ksoftirqd() == p) { | ||
329 | /* | ||
330 | * ksoftirqd time do not get accounted in cpu_softirq_time. | ||
331 | * So, we have to handle it separately here. | ||
332 | * Also, p->stime needs to be updated for ksoftirqd. | ||
333 | */ | ||
334 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | ||
335 | CPUTIME_SOFTIRQ); | ||
336 | } else if (user_tick) { | ||
337 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | ||
338 | } else if (p == rq->idle) { | ||
339 | account_idle_time(cputime_one_jiffy); | ||
340 | } else if (p->flags & PF_VCPU) { /* System time or guest time */ | ||
341 | account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); | ||
342 | } else { | ||
343 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | ||
344 | CPUTIME_SYSTEM); | ||
345 | } | ||
346 | } | ||
347 | |||
348 | static void irqtime_account_idle_ticks(int ticks) | ||
349 | { | ||
350 | int i; | ||
351 | struct rq *rq = this_rq(); | ||
352 | |||
353 | for (i = 0; i < ticks; i++) | ||
354 | irqtime_account_process_tick(current, 0, rq); | ||
355 | } | ||
356 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
357 | static void irqtime_account_idle_ticks(int ticks) {} | ||
358 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | ||
359 | struct rq *rq) {} | ||
360 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
361 | |||
362 | /* | ||
363 | * Account a single tick of cpu time. | ||
364 | * @p: the process that the cpu time gets accounted to | ||
365 | * @user_tick: indicates if the tick is a user or a system tick | ||
366 | */ | ||
367 | void account_process_tick(struct task_struct *p, int user_tick) | ||
368 | { | ||
369 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | ||
370 | struct rq *rq = this_rq(); | ||
371 | |||
372 | if (sched_clock_irqtime) { | ||
373 | irqtime_account_process_tick(p, user_tick, rq); | ||
374 | return; | ||
375 | } | ||
376 | |||
377 | if (steal_account_process_tick()) | ||
378 | return; | ||
379 | |||
380 | if (user_tick) | ||
381 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | ||
382 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) | ||
383 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, | ||
384 | one_jiffy_scaled); | ||
385 | else | ||
386 | account_idle_time(cputime_one_jiffy); | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Account multiple ticks of steal time. | ||
391 | * @p: the process from which the cpu time has been stolen | ||
392 | * @ticks: number of stolen ticks | ||
393 | */ | ||
394 | void account_steal_ticks(unsigned long ticks) | ||
395 | { | ||
396 | account_steal_time(jiffies_to_cputime(ticks)); | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * Account multiple ticks of idle time. | ||
401 | * @ticks: number of stolen ticks | ||
402 | */ | ||
403 | void account_idle_ticks(unsigned long ticks) | ||
404 | { | ||
405 | |||
406 | if (sched_clock_irqtime) { | ||
407 | irqtime_account_idle_ticks(ticks); | ||
408 | return; | ||
409 | } | ||
410 | |||
411 | account_idle_time(jiffies_to_cputime(ticks)); | ||
412 | } | ||
413 | |||
414 | #endif | ||
415 | |||
416 | /* | ||
417 | * Use precise platform statistics if available: | ||
418 | */ | ||
419 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
420 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
421 | { | ||
422 | *ut = p->utime; | ||
423 | *st = p->stime; | ||
424 | } | ||
425 | |||
426 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
427 | { | ||
428 | struct task_cputime cputime; | ||
429 | |||
430 | thread_group_cputime(p, &cputime); | ||
431 | |||
432 | *ut = cputime.utime; | ||
433 | *st = cputime.stime; | ||
434 | } | ||
435 | |||
436 | /* | ||
437 | * Archs that account the whole time spent in the idle task | ||
438 | * (outside irq) as idle time can rely on this and just implement | ||
439 | * vtime_account_system() and vtime_account_idle(). Archs that | ||
440 | * have other meaning of the idle time (s390 only includes the | ||
441 | * time spent by the CPU when it's in low power mode) must override | ||
442 | * vtime_account(). | ||
443 | */ | ||
444 | #ifndef __ARCH_HAS_VTIME_ACCOUNT | ||
445 | void vtime_account(struct task_struct *tsk) | ||
446 | { | ||
447 | unsigned long flags; | ||
448 | |||
449 | local_irq_save(flags); | ||
450 | |||
451 | if (in_interrupt() || !is_idle_task(tsk)) | ||
452 | vtime_account_system(tsk); | ||
453 | else | ||
454 | vtime_account_idle(tsk); | ||
455 | |||
456 | local_irq_restore(flags); | ||
457 | } | ||
458 | EXPORT_SYMBOL_GPL(vtime_account); | ||
459 | #endif /* __ARCH_HAS_VTIME_ACCOUNT */ | ||
460 | |||
461 | #else | ||
462 | |||
463 | #ifndef nsecs_to_cputime | ||
464 | # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) | ||
465 | #endif | ||
466 | |||
467 | static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) | ||
468 | { | ||
469 | u64 temp = (__force u64) rtime; | ||
470 | |||
471 | temp *= (__force u64) utime; | ||
472 | |||
473 | if (sizeof(cputime_t) == 4) | ||
474 | temp = div_u64(temp, (__force u32) total); | ||
475 | else | ||
476 | temp = div64_u64(temp, (__force u64) total); | ||
477 | |||
478 | return (__force cputime_t) temp; | ||
479 | } | ||
480 | |||
481 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
482 | { | ||
483 | cputime_t rtime, utime = p->utime, total = utime + p->stime; | ||
484 | |||
485 | /* | ||
486 | * Use CFS's precise accounting: | ||
487 | */ | ||
488 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | ||
489 | |||
490 | if (total) | ||
491 | utime = scale_utime(utime, rtime, total); | ||
492 | else | ||
493 | utime = rtime; | ||
494 | |||
495 | /* | ||
496 | * Compare with previous values, to keep monotonicity: | ||
497 | */ | ||
498 | p->prev_utime = max(p->prev_utime, utime); | ||
499 | p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); | ||
500 | |||
501 | *ut = p->prev_utime; | ||
502 | *st = p->prev_stime; | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Must be called with siglock held. | ||
507 | */ | ||
508 | void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | ||
509 | { | ||
510 | struct signal_struct *sig = p->signal; | ||
511 | struct task_cputime cputime; | ||
512 | cputime_t rtime, utime, total; | ||
513 | |||
514 | thread_group_cputime(p, &cputime); | ||
515 | |||
516 | total = cputime.utime + cputime.stime; | ||
517 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | ||
518 | |||
519 | if (total) | ||
520 | utime = scale_utime(cputime.utime, rtime, total); | ||
521 | else | ||
522 | utime = rtime; | ||
523 | |||
524 | sig->prev_utime = max(sig->prev_utime, utime); | ||
525 | sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime); | ||
526 | |||
527 | *ut = sig->prev_utime; | ||
528 | *st = sig->prev_stime; | ||
529 | } | ||
530 | #endif | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c219bf8d704c..6b800a14b990 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -597,7 +597,7 @@ calc_delta_fair(unsigned long delta, struct sched_entity *se) | |||
597 | /* | 597 | /* |
598 | * The idea is to set a period in which each task runs once. | 598 | * The idea is to set a period in which each task runs once. |
599 | * | 599 | * |
600 | * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch | 600 | * When there are too many tasks (sched_nr_latency) we have to stretch |
601 | * this period because otherwise the slices get too small. | 601 | * this period because otherwise the slices get too small. |
602 | * | 602 | * |
603 | * p = (nr <= nl) ? l : l*nr/nl | 603 | * p = (nr <= nl) ? l : l*nr/nl |
@@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | |||
2052 | hrtimer_cancel(&cfs_b->slack_timer); | 2052 | hrtimer_cancel(&cfs_b->slack_timer); |
2053 | } | 2053 | } |
2054 | 2054 | ||
2055 | void unthrottle_offline_cfs_rqs(struct rq *rq) | 2055 | static void unthrottle_offline_cfs_rqs(struct rq *rq) |
2056 | { | 2056 | { |
2057 | struct cfs_rq *cfs_rq; | 2057 | struct cfs_rq *cfs_rq; |
2058 | 2058 | ||
@@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) | |||
2106 | return NULL; | 2106 | return NULL; |
2107 | } | 2107 | } |
2108 | static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} | 2108 | static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} |
2109 | void unthrottle_offline_cfs_rqs(struct rq *rq) {} | 2109 | static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} |
2110 | 2110 | ||
2111 | #endif /* CONFIG_CFS_BANDWIDTH */ | 2111 | #endif /* CONFIG_CFS_BANDWIDTH */ |
2112 | 2112 | ||
@@ -2637,6 +2637,8 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
2637 | int cpu = smp_processor_id(); | 2637 | int cpu = smp_processor_id(); |
2638 | int prev_cpu = task_cpu(p); | 2638 | int prev_cpu = task_cpu(p); |
2639 | struct sched_domain *sd; | 2639 | struct sched_domain *sd; |
2640 | struct sched_group *sg; | ||
2641 | int i; | ||
2640 | 2642 | ||
2641 | /* | 2643 | /* |
2642 | * If the task is going to be woken-up on this cpu and if it is | 2644 | * If the task is going to be woken-up on this cpu and if it is |
@@ -2653,17 +2655,29 @@ static int select_idle_sibling(struct task_struct *p, int target) | |||
2653 | return prev_cpu; | 2655 | return prev_cpu; |
2654 | 2656 | ||
2655 | /* | 2657 | /* |
2656 | * Otherwise, check assigned siblings to find an elegible idle cpu. | 2658 | * Otherwise, iterate the domains and find an elegible idle cpu. |
2657 | */ | 2659 | */ |
2658 | sd = rcu_dereference(per_cpu(sd_llc, target)); | 2660 | sd = rcu_dereference(per_cpu(sd_llc, target)); |
2659 | |||
2660 | for_each_lower_domain(sd) { | 2661 | for_each_lower_domain(sd) { |
2661 | if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p))) | 2662 | sg = sd->groups; |
2662 | continue; | 2663 | do { |
2663 | if (idle_cpu(sd->idle_buddy)) | 2664 | if (!cpumask_intersects(sched_group_cpus(sg), |
2664 | return sd->idle_buddy; | 2665 | tsk_cpus_allowed(p))) |
2665 | } | 2666 | goto next; |
2666 | 2667 | ||
2668 | for_each_cpu(i, sched_group_cpus(sg)) { | ||
2669 | if (!idle_cpu(i)) | ||
2670 | goto next; | ||
2671 | } | ||
2672 | |||
2673 | target = cpumask_first_and(sched_group_cpus(sg), | ||
2674 | tsk_cpus_allowed(p)); | ||
2675 | goto done; | ||
2676 | next: | ||
2677 | sg = sg->next; | ||
2678 | } while (sg != sd->groups); | ||
2679 | } | ||
2680 | done: | ||
2667 | return target; | 2681 | return target; |
2668 | } | 2682 | } |
2669 | 2683 | ||
@@ -2686,7 +2700,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | |||
2686 | int prev_cpu = task_cpu(p); | 2700 | int prev_cpu = task_cpu(p); |
2687 | int new_cpu = cpu; | 2701 | int new_cpu = cpu; |
2688 | int want_affine = 0; | 2702 | int want_affine = 0; |
2689 | int want_sd = 1; | ||
2690 | int sync = wake_flags & WF_SYNC; | 2703 | int sync = wake_flags & WF_SYNC; |
2691 | 2704 | ||
2692 | if (p->nr_cpus_allowed == 1) | 2705 | if (p->nr_cpus_allowed == 1) |
@@ -2704,48 +2717,21 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) | |||
2704 | continue; | 2717 | continue; |
2705 | 2718 | ||
2706 | /* | 2719 | /* |
2707 | * If power savings logic is enabled for a domain, see if we | ||
2708 | * are not overloaded, if so, don't balance wider. | ||
2709 | */ | ||
2710 | if (tmp->flags & (SD_PREFER_LOCAL)) { | ||
2711 | unsigned long power = 0; | ||
2712 | unsigned long nr_running = 0; | ||
2713 | unsigned long capacity; | ||
2714 | int i; | ||
2715 | |||
2716 | for_each_cpu(i, sched_domain_span(tmp)) { | ||
2717 | power += power_of(i); | ||
2718 | nr_running += cpu_rq(i)->cfs.nr_running; | ||
2719 | } | ||
2720 | |||
2721 | capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); | ||
2722 | |||
2723 | if (nr_running < capacity) | ||
2724 | want_sd = 0; | ||
2725 | } | ||
2726 | |||
2727 | /* | ||
2728 | * If both cpu and prev_cpu are part of this domain, | 2720 | * If both cpu and prev_cpu are part of this domain, |
2729 | * cpu is a valid SD_WAKE_AFFINE target. | 2721 | * cpu is a valid SD_WAKE_AFFINE target. |
2730 | */ | 2722 | */ |
2731 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | 2723 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && |
2732 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | 2724 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { |
2733 | affine_sd = tmp; | 2725 | affine_sd = tmp; |
2734 | want_affine = 0; | ||
2735 | } | ||
2736 | |||
2737 | if (!want_sd && !want_affine) | ||
2738 | break; | 2726 | break; |
2727 | } | ||
2739 | 2728 | ||
2740 | if (!(tmp->flags & sd_flag)) | 2729 | if (tmp->flags & sd_flag) |
2741 | continue; | ||
2742 | |||
2743 | if (want_sd) | ||
2744 | sd = tmp; | 2730 | sd = tmp; |
2745 | } | 2731 | } |
2746 | 2732 | ||
2747 | if (affine_sd) { | 2733 | if (affine_sd) { |
2748 | if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) | 2734 | if (cpu != prev_cpu && wake_affine(affine_sd, p, sync)) |
2749 | prev_cpu = cpu; | 2735 | prev_cpu = cpu; |
2750 | 2736 | ||
2751 | new_cpu = select_idle_sibling(p, prev_cpu); | 2737 | new_cpu = select_idle_sibling(p, prev_cpu); |
@@ -3658,7 +3644,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) | |||
3658 | * @group: sched_group whose statistics are to be updated. | 3644 | * @group: sched_group whose statistics are to be updated. |
3659 | * @load_idx: Load index of sched_domain of this_cpu for load calc. | 3645 | * @load_idx: Load index of sched_domain of this_cpu for load calc. |
3660 | * @local_group: Does group contain this_cpu. | 3646 | * @local_group: Does group contain this_cpu. |
3661 | * @cpus: Set of cpus considered for load balancing. | ||
3662 | * @balance: Should we balance. | 3647 | * @balance: Should we balance. |
3663 | * @sgs: variable to hold the statistics for this group. | 3648 | * @sgs: variable to hold the statistics for this group. |
3664 | */ | 3649 | */ |
@@ -3805,7 +3790,6 @@ static bool update_sd_pick_busiest(struct lb_env *env, | |||
3805 | /** | 3790 | /** |
3806 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. | 3791 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. |
3807 | * @env: The load balancing environment. | 3792 | * @env: The load balancing environment. |
3808 | * @cpus: Set of cpus considered for load balancing. | ||
3809 | * @balance: Should we balance. | 3793 | * @balance: Should we balance. |
3810 | * @sds: variable to hold the statistics for this sched_domain. | 3794 | * @sds: variable to hold the statistics for this sched_domain. |
3811 | */ | 3795 | */ |
@@ -4283,7 +4267,7 @@ redo: | |||
4283 | goto out_balanced; | 4267 | goto out_balanced; |
4284 | } | 4268 | } |
4285 | 4269 | ||
4286 | BUG_ON(busiest == this_rq); | 4270 | BUG_ON(busiest == env.dst_rq); |
4287 | 4271 | ||
4288 | schedstat_add(sd, lb_imbalance[idle], env.imbalance); | 4272 | schedstat_add(sd, lb_imbalance[idle], env.imbalance); |
4289 | 4273 | ||
@@ -4304,7 +4288,7 @@ redo: | |||
4304 | update_h_load(env.src_cpu); | 4288 | update_h_load(env.src_cpu); |
4305 | more_balance: | 4289 | more_balance: |
4306 | local_irq_save(flags); | 4290 | local_irq_save(flags); |
4307 | double_rq_lock(this_rq, busiest); | 4291 | double_rq_lock(env.dst_rq, busiest); |
4308 | 4292 | ||
4309 | /* | 4293 | /* |
4310 | * cur_ld_moved - load moved in current iteration | 4294 | * cur_ld_moved - load moved in current iteration |
@@ -4312,7 +4296,7 @@ more_balance: | |||
4312 | */ | 4296 | */ |
4313 | cur_ld_moved = move_tasks(&env); | 4297 | cur_ld_moved = move_tasks(&env); |
4314 | ld_moved += cur_ld_moved; | 4298 | ld_moved += cur_ld_moved; |
4315 | double_rq_unlock(this_rq, busiest); | 4299 | double_rq_unlock(env.dst_rq, busiest); |
4316 | local_irq_restore(flags); | 4300 | local_irq_restore(flags); |
4317 | 4301 | ||
4318 | if (env.flags & LBF_NEED_BREAK) { | 4302 | if (env.flags & LBF_NEED_BREAK) { |
@@ -4348,8 +4332,7 @@ more_balance: | |||
4348 | if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 && | 4332 | if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 && |
4349 | lb_iterations++ < max_lb_iterations) { | 4333 | lb_iterations++ < max_lb_iterations) { |
4350 | 4334 | ||
4351 | this_rq = cpu_rq(env.new_dst_cpu); | 4335 | env.dst_rq = cpu_rq(env.new_dst_cpu); |
4352 | env.dst_rq = this_rq; | ||
4353 | env.dst_cpu = env.new_dst_cpu; | 4336 | env.dst_cpu = env.new_dst_cpu; |
4354 | env.flags &= ~LBF_SOME_PINNED; | 4337 | env.flags &= ~LBF_SOME_PINNED; |
4355 | env.loop = 0; | 4338 | env.loop = 0; |
@@ -4634,7 +4617,7 @@ static void nohz_balancer_kick(int cpu) | |||
4634 | return; | 4617 | return; |
4635 | } | 4618 | } |
4636 | 4619 | ||
4637 | static inline void clear_nohz_tick_stopped(int cpu) | 4620 | static inline void nohz_balance_exit_idle(int cpu) |
4638 | { | 4621 | { |
4639 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { | 4622 | if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { |
4640 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); | 4623 | cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); |
@@ -4674,28 +4657,23 @@ void set_cpu_sd_state_idle(void) | |||
4674 | } | 4657 | } |
4675 | 4658 | ||
4676 | /* | 4659 | /* |
4677 | * This routine will record that this cpu is going idle with tick stopped. | 4660 | * This routine will record that the cpu is going idle with tick stopped. |
4678 | * This info will be used in performing idle load balancing in the future. | 4661 | * This info will be used in performing idle load balancing in the future. |
4679 | */ | 4662 | */ |
4680 | void select_nohz_load_balancer(int stop_tick) | 4663 | void nohz_balance_enter_idle(int cpu) |
4681 | { | 4664 | { |
4682 | int cpu = smp_processor_id(); | ||
4683 | |||
4684 | /* | 4665 | /* |
4685 | * If this cpu is going down, then nothing needs to be done. | 4666 | * If this cpu is going down, then nothing needs to be done. |
4686 | */ | 4667 | */ |
4687 | if (!cpu_active(cpu)) | 4668 | if (!cpu_active(cpu)) |
4688 | return; | 4669 | return; |
4689 | 4670 | ||
4690 | if (stop_tick) { | 4671 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) |
4691 | if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) | 4672 | return; |
4692 | return; | ||
4693 | 4673 | ||
4694 | cpumask_set_cpu(cpu, nohz.idle_cpus_mask); | 4674 | cpumask_set_cpu(cpu, nohz.idle_cpus_mask); |
4695 | atomic_inc(&nohz.nr_cpus); | 4675 | atomic_inc(&nohz.nr_cpus); |
4696 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | 4676 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); |
4697 | } | ||
4698 | return; | ||
4699 | } | 4677 | } |
4700 | 4678 | ||
4701 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, | 4679 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, |
@@ -4703,7 +4681,7 @@ static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, | |||
4703 | { | 4681 | { |
4704 | switch (action & ~CPU_TASKS_FROZEN) { | 4682 | switch (action & ~CPU_TASKS_FROZEN) { |
4705 | case CPU_DYING: | 4683 | case CPU_DYING: |
4706 | clear_nohz_tick_stopped(smp_processor_id()); | 4684 | nohz_balance_exit_idle(smp_processor_id()); |
4707 | return NOTIFY_OK; | 4685 | return NOTIFY_OK; |
4708 | default: | 4686 | default: |
4709 | return NOTIFY_DONE; | 4687 | return NOTIFY_DONE; |
@@ -4825,14 +4803,15 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) | |||
4825 | if (need_resched()) | 4803 | if (need_resched()) |
4826 | break; | 4804 | break; |
4827 | 4805 | ||
4828 | raw_spin_lock_irq(&this_rq->lock); | 4806 | rq = cpu_rq(balance_cpu); |
4829 | update_rq_clock(this_rq); | 4807 | |
4830 | update_idle_cpu_load(this_rq); | 4808 | raw_spin_lock_irq(&rq->lock); |
4831 | raw_spin_unlock_irq(&this_rq->lock); | 4809 | update_rq_clock(rq); |
4810 | update_idle_cpu_load(rq); | ||
4811 | raw_spin_unlock_irq(&rq->lock); | ||
4832 | 4812 | ||
4833 | rebalance_domains(balance_cpu, CPU_IDLE); | 4813 | rebalance_domains(balance_cpu, CPU_IDLE); |
4834 | 4814 | ||
4835 | rq = cpu_rq(balance_cpu); | ||
4836 | if (time_after(this_rq->next_balance, rq->next_balance)) | 4815 | if (time_after(this_rq->next_balance, rq->next_balance)) |
4837 | this_rq->next_balance = rq->next_balance; | 4816 | this_rq->next_balance = rq->next_balance; |
4838 | } | 4817 | } |
@@ -4863,7 +4842,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
4863 | * busy tick after returning from idle, we will update the busy stats. | 4842 | * busy tick after returning from idle, we will update the busy stats. |
4864 | */ | 4843 | */ |
4865 | set_cpu_sd_state_busy(); | 4844 | set_cpu_sd_state_busy(); |
4866 | clear_nohz_tick_stopped(cpu); | 4845 | nohz_balance_exit_idle(cpu); |
4867 | 4846 | ||
4868 | /* | 4847 | /* |
4869 | * None are in tickless mode and hence no need for NOHZ idle load | 4848 | * None are in tickless mode and hence no need for NOHZ idle load |
@@ -4956,6 +4935,9 @@ static void rq_online_fair(struct rq *rq) | |||
4956 | static void rq_offline_fair(struct rq *rq) | 4935 | static void rq_offline_fair(struct rq *rq) |
4957 | { | 4936 | { |
4958 | update_sysctl(); | 4937 | update_sysctl(); |
4938 | |||
4939 | /* Ensure any throttled groups are reachable by pick_next_task */ | ||
4940 | unthrottle_offline_cfs_rqs(rq); | ||
4959 | } | 4941 | } |
4960 | 4942 | ||
4961 | #endif /* CONFIG_SMP */ | 4943 | #endif /* CONFIG_SMP */ |
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index de00a486c5c6..eebefcad7027 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h | |||
@@ -12,14 +12,6 @@ SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) | |||
12 | SCHED_FEAT(START_DEBIT, true) | 12 | SCHED_FEAT(START_DEBIT, true) |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * Based on load and program behaviour, see if it makes sense to place | ||
16 | * a newly woken task on the same cpu as the task that woke it -- | ||
17 | * improve cache locality. Typically used with SYNC wakeups as | ||
18 | * generated by pipes and the like, see also SYNC_WAKEUPS. | ||
19 | */ | ||
20 | SCHED_FEAT(AFFINE_WAKEUPS, true) | ||
21 | |||
22 | /* | ||
23 | * Prefer to schedule the task we woke last (assuming it failed | 15 | * Prefer to schedule the task we woke last (assuming it failed |
24 | * wakeup-preemption), since its likely going to consume data we | 16 | * wakeup-preemption), since its likely going to consume data we |
25 | * touched, increases cache locality. | 17 | * touched, increases cache locality. |
@@ -42,7 +34,7 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) | |||
42 | /* | 34 | /* |
43 | * Use arch dependent cpu power functions | 35 | * Use arch dependent cpu power functions |
44 | */ | 36 | */ |
45 | SCHED_FEAT(ARCH_POWER, false) | 37 | SCHED_FEAT(ARCH_POWER, true) |
46 | 38 | ||
47 | SCHED_FEAT(HRTICK, false) | 39 | SCHED_FEAT(HRTICK, false) |
48 | SCHED_FEAT(DOUBLE_TICK, false) | 40 | SCHED_FEAT(DOUBLE_TICK, false) |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 944cb68420e9..418feb01344e 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -691,6 +691,7 @@ balanced: | |||
691 | * runtime - in which case borrowing doesn't make sense. | 691 | * runtime - in which case borrowing doesn't make sense. |
692 | */ | 692 | */ |
693 | rt_rq->rt_runtime = RUNTIME_INF; | 693 | rt_rq->rt_runtime = RUNTIME_INF; |
694 | rt_rq->rt_throttled = 0; | ||
694 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 695 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
695 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 696 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
696 | } | 697 | } |
@@ -1631,11 +1632,6 @@ static int push_rt_task(struct rq *rq) | |||
1631 | if (!next_task) | 1632 | if (!next_task) |
1632 | return 0; | 1633 | return 0; |
1633 | 1634 | ||
1634 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1635 | if (unlikely(task_running(rq, next_task))) | ||
1636 | return 0; | ||
1637 | #endif | ||
1638 | |||
1639 | retry: | 1635 | retry: |
1640 | if (unlikely(next_task == rq->curr)) { | 1636 | if (unlikely(next_task == rq->curr)) { |
1641 | WARN_ON(1); | 1637 | WARN_ON(1); |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f6714d009e77..7a7db09cfabc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -737,11 +737,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
737 | */ | 737 | */ |
738 | next->on_cpu = 1; | 738 | next->on_cpu = 1; |
739 | #endif | 739 | #endif |
740 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
741 | raw_spin_unlock_irq(&rq->lock); | ||
742 | #else | ||
743 | raw_spin_unlock(&rq->lock); | 740 | raw_spin_unlock(&rq->lock); |
744 | #endif | ||
745 | } | 741 | } |
746 | 742 | ||
747 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | 743 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
@@ -755,9 +751,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
755 | smp_wmb(); | 751 | smp_wmb(); |
756 | prev->on_cpu = 0; | 752 | prev->on_cpu = 0; |
757 | #endif | 753 | #endif |
758 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
759 | local_irq_enable(); | 754 | local_irq_enable(); |
760 | #endif | ||
761 | } | 755 | } |
762 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 756 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
763 | 757 | ||
@@ -891,6 +885,9 @@ struct cpuacct { | |||
891 | struct kernel_cpustat __percpu *cpustat; | 885 | struct kernel_cpustat __percpu *cpustat; |
892 | }; | 886 | }; |
893 | 887 | ||
888 | extern struct cgroup_subsys cpuacct_subsys; | ||
889 | extern struct cpuacct root_cpuacct; | ||
890 | |||
894 | /* return cpu accounting group corresponding to this container */ | 891 | /* return cpu accounting group corresponding to this container */ |
895 | static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) | 892 | static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) |
896 | { | 893 | { |
@@ -917,6 +914,16 @@ extern void cpuacct_charge(struct task_struct *tsk, u64 cputime); | |||
917 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} | 914 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
918 | #endif | 915 | #endif |
919 | 916 | ||
917 | #ifdef CONFIG_PARAVIRT | ||
918 | static inline u64 steal_ticks(u64 steal) | ||
919 | { | ||
920 | if (unlikely(steal > NSEC_PER_SEC)) | ||
921 | return div_u64(steal, TICK_NSEC); | ||
922 | |||
923 | return __iter_div_u64_rem(steal, TICK_NSEC, &steal); | ||
924 | } | ||
925 | #endif | ||
926 | |||
920 | static inline void inc_nr_running(struct rq *rq) | 927 | static inline void inc_nr_running(struct rq *rq) |
921 | { | 928 | { |
922 | rq->nr_running++; | 929 | rq->nr_running++; |
@@ -1144,7 +1151,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu); | |||
1144 | 1151 | ||
1145 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); | 1152 | extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
1146 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); | 1153 | extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); |
1147 | extern void unthrottle_offline_cfs_rqs(struct rq *rq); | ||
1148 | 1154 | ||
1149 | extern void account_cfs_bandwidth_used(int enabled, int was_enabled); | 1155 | extern void account_cfs_bandwidth_used(int enabled, int was_enabled); |
1150 | 1156 | ||
@@ -1157,3 +1163,53 @@ enum rq_nohz_flag_bits { | |||
1157 | 1163 | ||
1158 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) | 1164 | #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
1159 | #endif | 1165 | #endif |
1166 | |||
1167 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
1168 | |||
1169 | DECLARE_PER_CPU(u64, cpu_hardirq_time); | ||
1170 | DECLARE_PER_CPU(u64, cpu_softirq_time); | ||
1171 | |||
1172 | #ifndef CONFIG_64BIT | ||
1173 | DECLARE_PER_CPU(seqcount_t, irq_time_seq); | ||
1174 | |||
1175 | static inline void irq_time_write_begin(void) | ||
1176 | { | ||
1177 | __this_cpu_inc(irq_time_seq.sequence); | ||
1178 | smp_wmb(); | ||
1179 | } | ||
1180 | |||
1181 | static inline void irq_time_write_end(void) | ||
1182 | { | ||
1183 | smp_wmb(); | ||
1184 | __this_cpu_inc(irq_time_seq.sequence); | ||
1185 | } | ||
1186 | |||
1187 | static inline u64 irq_time_read(int cpu) | ||
1188 | { | ||
1189 | u64 irq_time; | ||
1190 | unsigned seq; | ||
1191 | |||
1192 | do { | ||
1193 | seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); | ||
1194 | irq_time = per_cpu(cpu_softirq_time, cpu) + | ||
1195 | per_cpu(cpu_hardirq_time, cpu); | ||
1196 | } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); | ||
1197 | |||
1198 | return irq_time; | ||
1199 | } | ||
1200 | #else /* CONFIG_64BIT */ | ||
1201 | static inline void irq_time_write_begin(void) | ||
1202 | { | ||
1203 | } | ||
1204 | |||
1205 | static inline void irq_time_write_end(void) | ||
1206 | { | ||
1207 | } | ||
1208 | |||
1209 | static inline u64 irq_time_read(int cpu) | ||
1210 | { | ||
1211 | return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); | ||
1212 | } | ||
1213 | #endif /* CONFIG_64BIT */ | ||
1214 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
1215 | |||
diff --git a/kernel/signal.c b/kernel/signal.c index be4f856d52f8..2c681f11b7d2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1971,13 +1971,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why) | |||
1971 | void ptrace_notify(int exit_code) | 1971 | void ptrace_notify(int exit_code) |
1972 | { | 1972 | { |
1973 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | 1973 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
1974 | if (unlikely(current->task_works)) { | 1974 | if (unlikely(current->task_works)) |
1975 | if (test_and_clear_ti_thread_flag(current_thread_info(), | 1975 | task_work_run(); |
1976 | TIF_NOTIFY_RESUME)) { | ||
1977 | smp_mb__after_clear_bit(); | ||
1978 | task_work_run(); | ||
1979 | } | ||
1980 | } | ||
1981 | 1976 | ||
1982 | spin_lock_irq(¤t->sighand->siglock); | 1977 | spin_lock_irq(¤t->sighand->siglock); |
1983 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); | 1978 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
@@ -2198,13 +2193,8 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, | |||
2198 | struct signal_struct *signal = current->signal; | 2193 | struct signal_struct *signal = current->signal; |
2199 | int signr; | 2194 | int signr; |
2200 | 2195 | ||
2201 | if (unlikely(current->task_works)) { | 2196 | if (unlikely(current->task_works)) |
2202 | if (test_and_clear_ti_thread_flag(current_thread_info(), | 2197 | task_work_run(); |
2203 | TIF_NOTIFY_RESUME)) { | ||
2204 | smp_mb__after_clear_bit(); | ||
2205 | task_work_run(); | ||
2206 | } | ||
2207 | } | ||
2208 | 2198 | ||
2209 | if (unlikely(uprobe_deny_signal())) | 2199 | if (unlikely(uprobe_deny_signal())) |
2210 | return 0; | 2200 | return 0; |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 98f60c5caa1b..d6c5fc054242 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -1,14 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * Common SMP CPU bringup/teardown functions | 2 | * Common SMP CPU bringup/teardown functions |
3 | */ | 3 | */ |
4 | #include <linux/cpu.h> | ||
4 | #include <linux/err.h> | 5 | #include <linux/err.h> |
5 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
6 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/list.h> | ||
9 | #include <linux/slab.h> | ||
7 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/export.h> | ||
8 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/kthread.h> | ||
14 | #include <linux/smpboot.h> | ||
9 | 15 | ||
10 | #include "smpboot.h" | 16 | #include "smpboot.h" |
11 | 17 | ||
18 | #ifdef CONFIG_SMP | ||
19 | |||
12 | #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD | 20 | #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD |
13 | /* | 21 | /* |
14 | * For the hotplug case we keep the task structs around and reuse | 22 | * For the hotplug case we keep the task structs around and reuse |
@@ -65,3 +73,228 @@ void __init idle_threads_init(void) | |||
65 | } | 73 | } |
66 | } | 74 | } |
67 | #endif | 75 | #endif |
76 | |||
77 | #endif /* #ifdef CONFIG_SMP */ | ||
78 | |||
79 | static LIST_HEAD(hotplug_threads); | ||
80 | static DEFINE_MUTEX(smpboot_threads_lock); | ||
81 | |||
82 | struct smpboot_thread_data { | ||
83 | unsigned int cpu; | ||
84 | unsigned int status; | ||
85 | struct smp_hotplug_thread *ht; | ||
86 | }; | ||
87 | |||
88 | enum { | ||
89 | HP_THREAD_NONE = 0, | ||
90 | HP_THREAD_ACTIVE, | ||
91 | HP_THREAD_PARKED, | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * smpboot_thread_fn - percpu hotplug thread loop function | ||
96 | * @data: thread data pointer | ||
97 | * | ||
98 | * Checks for thread stop and park conditions. Calls the necessary | ||
99 | * setup, cleanup, park and unpark functions for the registered | ||
100 | * thread. | ||
101 | * | ||
102 | * Returns 1 when the thread should exit, 0 otherwise. | ||
103 | */ | ||
104 | static int smpboot_thread_fn(void *data) | ||
105 | { | ||
106 | struct smpboot_thread_data *td = data; | ||
107 | struct smp_hotplug_thread *ht = td->ht; | ||
108 | |||
109 | while (1) { | ||
110 | set_current_state(TASK_INTERRUPTIBLE); | ||
111 | preempt_disable(); | ||
112 | if (kthread_should_stop()) { | ||
113 | set_current_state(TASK_RUNNING); | ||
114 | preempt_enable(); | ||
115 | if (ht->cleanup) | ||
116 | ht->cleanup(td->cpu, cpu_online(td->cpu)); | ||
117 | kfree(td); | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | if (kthread_should_park()) { | ||
122 | __set_current_state(TASK_RUNNING); | ||
123 | preempt_enable(); | ||
124 | if (ht->park && td->status == HP_THREAD_ACTIVE) { | ||
125 | BUG_ON(td->cpu != smp_processor_id()); | ||
126 | ht->park(td->cpu); | ||
127 | td->status = HP_THREAD_PARKED; | ||
128 | } | ||
129 | kthread_parkme(); | ||
130 | /* We might have been woken for stop */ | ||
131 | continue; | ||
132 | } | ||
133 | |||
134 | BUG_ON(td->cpu != smp_processor_id()); | ||
135 | |||
136 | /* Check for state change setup */ | ||
137 | switch (td->status) { | ||
138 | case HP_THREAD_NONE: | ||
139 | preempt_enable(); | ||
140 | if (ht->setup) | ||
141 | ht->setup(td->cpu); | ||
142 | td->status = HP_THREAD_ACTIVE; | ||
143 | preempt_disable(); | ||
144 | break; | ||
145 | case HP_THREAD_PARKED: | ||
146 | preempt_enable(); | ||
147 | if (ht->unpark) | ||
148 | ht->unpark(td->cpu); | ||
149 | td->status = HP_THREAD_ACTIVE; | ||
150 | preempt_disable(); | ||
151 | break; | ||
152 | } | ||
153 | |||
154 | if (!ht->thread_should_run(td->cpu)) { | ||
155 | preempt_enable(); | ||
156 | schedule(); | ||
157 | } else { | ||
158 | set_current_state(TASK_RUNNING); | ||
159 | preempt_enable(); | ||
160 | ht->thread_fn(td->cpu); | ||
161 | } | ||
162 | } | ||
163 | } | ||
164 | |||
165 | static int | ||
166 | __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | ||
167 | { | ||
168 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | ||
169 | struct smpboot_thread_data *td; | ||
170 | |||
171 | if (tsk) | ||
172 | return 0; | ||
173 | |||
174 | td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu)); | ||
175 | if (!td) | ||
176 | return -ENOMEM; | ||
177 | td->cpu = cpu; | ||
178 | td->ht = ht; | ||
179 | |||
180 | tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu, | ||
181 | ht->thread_comm); | ||
182 | if (IS_ERR(tsk)) { | ||
183 | kfree(td); | ||
184 | return PTR_ERR(tsk); | ||
185 | } | ||
186 | |||
187 | get_task_struct(tsk); | ||
188 | *per_cpu_ptr(ht->store, cpu) = tsk; | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | int smpboot_create_threads(unsigned int cpu) | ||
193 | { | ||
194 | struct smp_hotplug_thread *cur; | ||
195 | int ret = 0; | ||
196 | |||
197 | mutex_lock(&smpboot_threads_lock); | ||
198 | list_for_each_entry(cur, &hotplug_threads, list) { | ||
199 | ret = __smpboot_create_thread(cur, cpu); | ||
200 | if (ret) | ||
201 | break; | ||
202 | } | ||
203 | mutex_unlock(&smpboot_threads_lock); | ||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | ||
208 | { | ||
209 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | ||
210 | |||
211 | kthread_unpark(tsk); | ||
212 | } | ||
213 | |||
214 | void smpboot_unpark_threads(unsigned int cpu) | ||
215 | { | ||
216 | struct smp_hotplug_thread *cur; | ||
217 | |||
218 | mutex_lock(&smpboot_threads_lock); | ||
219 | list_for_each_entry(cur, &hotplug_threads, list) | ||
220 | smpboot_unpark_thread(cur, cpu); | ||
221 | mutex_unlock(&smpboot_threads_lock); | ||
222 | } | ||
223 | |||
224 | static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | ||
225 | { | ||
226 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | ||
227 | |||
228 | if (tsk) | ||
229 | kthread_park(tsk); | ||
230 | } | ||
231 | |||
232 | void smpboot_park_threads(unsigned int cpu) | ||
233 | { | ||
234 | struct smp_hotplug_thread *cur; | ||
235 | |||
236 | mutex_lock(&smpboot_threads_lock); | ||
237 | list_for_each_entry_reverse(cur, &hotplug_threads, list) | ||
238 | smpboot_park_thread(cur, cpu); | ||
239 | mutex_unlock(&smpboot_threads_lock); | ||
240 | } | ||
241 | |||
242 | static void smpboot_destroy_threads(struct smp_hotplug_thread *ht) | ||
243 | { | ||
244 | unsigned int cpu; | ||
245 | |||
246 | /* We need to destroy also the parked threads of offline cpus */ | ||
247 | for_each_possible_cpu(cpu) { | ||
248 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | ||
249 | |||
250 | if (tsk) { | ||
251 | kthread_stop(tsk); | ||
252 | put_task_struct(tsk); | ||
253 | *per_cpu_ptr(ht->store, cpu) = NULL; | ||
254 | } | ||
255 | } | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug | ||
260 | * @plug_thread: Hotplug thread descriptor | ||
261 | * | ||
262 | * Creates and starts the threads on all online cpus. | ||
263 | */ | ||
264 | int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) | ||
265 | { | ||
266 | unsigned int cpu; | ||
267 | int ret = 0; | ||
268 | |||
269 | mutex_lock(&smpboot_threads_lock); | ||
270 | for_each_online_cpu(cpu) { | ||
271 | ret = __smpboot_create_thread(plug_thread, cpu); | ||
272 | if (ret) { | ||
273 | smpboot_destroy_threads(plug_thread); | ||
274 | goto out; | ||
275 | } | ||
276 | smpboot_unpark_thread(plug_thread, cpu); | ||
277 | } | ||
278 | list_add(&plug_thread->list, &hotplug_threads); | ||
279 | out: | ||
280 | mutex_unlock(&smpboot_threads_lock); | ||
281 | return ret; | ||
282 | } | ||
283 | EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); | ||
284 | |||
285 | /** | ||
286 | * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug | ||
287 | * @plug_thread: Hotplug thread descriptor | ||
288 | * | ||
289 | * Stops all threads on all possible cpus. | ||
290 | */ | ||
291 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread) | ||
292 | { | ||
293 | get_online_cpus(); | ||
294 | mutex_lock(&smpboot_threads_lock); | ||
295 | list_del(&plug_thread->list); | ||
296 | smpboot_destroy_threads(plug_thread); | ||
297 | mutex_unlock(&smpboot_threads_lock); | ||
298 | put_online_cpus(); | ||
299 | } | ||
300 | EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread); | ||
diff --git a/kernel/smpboot.h b/kernel/smpboot.h index 6ef9433e1c70..72415a0eb955 100644 --- a/kernel/smpboot.h +++ b/kernel/smpboot.h | |||
@@ -13,4 +13,8 @@ static inline void idle_thread_set_boot_cpu(void) { } | |||
13 | static inline void idle_threads_init(void) { } | 13 | static inline void idle_threads_init(void) { } |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | int smpboot_create_threads(unsigned int cpu); | ||
17 | void smpboot_park_threads(unsigned int cpu); | ||
18 | void smpboot_unpark_threads(unsigned int cpu); | ||
19 | |||
16 | #endif | 20 | #endif |
diff --git a/kernel/softirq.c b/kernel/softirq.c index b73e681df09e..cc96bdc0c2c9 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/ftrace.h> | 24 | #include <linux/ftrace.h> |
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/smpboot.h> | ||
26 | #include <linux/tick.h> | 27 | #include <linux/tick.h> |
27 | 28 | ||
28 | #define CREATE_TRACE_POINTS | 29 | #define CREATE_TRACE_POINTS |
@@ -220,7 +221,7 @@ asmlinkage void __do_softirq(void) | |||
220 | current->flags &= ~PF_MEMALLOC; | 221 | current->flags &= ~PF_MEMALLOC; |
221 | 222 | ||
222 | pending = local_softirq_pending(); | 223 | pending = local_softirq_pending(); |
223 | account_system_vtime(current); | 224 | vtime_account(current); |
224 | 225 | ||
225 | __local_bh_disable((unsigned long)__builtin_return_address(0), | 226 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
226 | SOFTIRQ_OFFSET); | 227 | SOFTIRQ_OFFSET); |
@@ -271,7 +272,7 @@ restart: | |||
271 | 272 | ||
272 | lockdep_softirq_exit(); | 273 | lockdep_softirq_exit(); |
273 | 274 | ||
274 | account_system_vtime(current); | 275 | vtime_account(current); |
275 | __local_bh_enable(SOFTIRQ_OFFSET); | 276 | __local_bh_enable(SOFTIRQ_OFFSET); |
276 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); | 277 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
277 | } | 278 | } |
@@ -340,7 +341,7 @@ static inline void invoke_softirq(void) | |||
340 | */ | 341 | */ |
341 | void irq_exit(void) | 342 | void irq_exit(void) |
342 | { | 343 | { |
343 | account_system_vtime(current); | 344 | vtime_account(current); |
344 | trace_hardirq_exit(); | 345 | trace_hardirq_exit(); |
345 | sub_preempt_count(IRQ_EXIT_OFFSET); | 346 | sub_preempt_count(IRQ_EXIT_OFFSET); |
346 | if (!in_interrupt() && local_softirq_pending()) | 347 | if (!in_interrupt() && local_softirq_pending()) |
@@ -742,49 +743,22 @@ void __init softirq_init(void) | |||
742 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 743 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
743 | } | 744 | } |
744 | 745 | ||
745 | static int run_ksoftirqd(void * __bind_cpu) | 746 | static int ksoftirqd_should_run(unsigned int cpu) |
746 | { | 747 | { |
747 | set_current_state(TASK_INTERRUPTIBLE); | 748 | return local_softirq_pending(); |
748 | 749 | } | |
749 | while (!kthread_should_stop()) { | ||
750 | preempt_disable(); | ||
751 | if (!local_softirq_pending()) { | ||
752 | schedule_preempt_disabled(); | ||
753 | } | ||
754 | |||
755 | __set_current_state(TASK_RUNNING); | ||
756 | |||
757 | while (local_softirq_pending()) { | ||
758 | /* Preempt disable stops cpu going offline. | ||
759 | If already offline, we'll be on wrong CPU: | ||
760 | don't process */ | ||
761 | if (cpu_is_offline((long)__bind_cpu)) | ||
762 | goto wait_to_die; | ||
763 | local_irq_disable(); | ||
764 | if (local_softirq_pending()) | ||
765 | __do_softirq(); | ||
766 | local_irq_enable(); | ||
767 | sched_preempt_enable_no_resched(); | ||
768 | cond_resched(); | ||
769 | preempt_disable(); | ||
770 | rcu_note_context_switch((long)__bind_cpu); | ||
771 | } | ||
772 | preempt_enable(); | ||
773 | set_current_state(TASK_INTERRUPTIBLE); | ||
774 | } | ||
775 | __set_current_state(TASK_RUNNING); | ||
776 | return 0; | ||
777 | 750 | ||
778 | wait_to_die: | 751 | static void run_ksoftirqd(unsigned int cpu) |
779 | preempt_enable(); | 752 | { |
780 | /* Wait for kthread_stop */ | 753 | local_irq_disable(); |
781 | set_current_state(TASK_INTERRUPTIBLE); | 754 | if (local_softirq_pending()) { |
782 | while (!kthread_should_stop()) { | 755 | __do_softirq(); |
783 | schedule(); | 756 | rcu_note_context_switch(cpu); |
784 | set_current_state(TASK_INTERRUPTIBLE); | 757 | local_irq_enable(); |
758 | cond_resched(); | ||
759 | return; | ||
785 | } | 760 | } |
786 | __set_current_state(TASK_RUNNING); | 761 | local_irq_enable(); |
787 | return 0; | ||
788 | } | 762 | } |
789 | 763 | ||
790 | #ifdef CONFIG_HOTPLUG_CPU | 764 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -850,50 +824,14 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
850 | unsigned long action, | 824 | unsigned long action, |
851 | void *hcpu) | 825 | void *hcpu) |
852 | { | 826 | { |
853 | int hotcpu = (unsigned long)hcpu; | ||
854 | struct task_struct *p; | ||
855 | |||
856 | switch (action) { | 827 | switch (action) { |
857 | case CPU_UP_PREPARE: | ||
858 | case CPU_UP_PREPARE_FROZEN: | ||
859 | p = kthread_create_on_node(run_ksoftirqd, | ||
860 | hcpu, | ||
861 | cpu_to_node(hotcpu), | ||
862 | "ksoftirqd/%d", hotcpu); | ||
863 | if (IS_ERR(p)) { | ||
864 | printk("ksoftirqd for %i failed\n", hotcpu); | ||
865 | return notifier_from_errno(PTR_ERR(p)); | ||
866 | } | ||
867 | kthread_bind(p, hotcpu); | ||
868 | per_cpu(ksoftirqd, hotcpu) = p; | ||
869 | break; | ||
870 | case CPU_ONLINE: | ||
871 | case CPU_ONLINE_FROZEN: | ||
872 | wake_up_process(per_cpu(ksoftirqd, hotcpu)); | ||
873 | break; | ||
874 | #ifdef CONFIG_HOTPLUG_CPU | 828 | #ifdef CONFIG_HOTPLUG_CPU |
875 | case CPU_UP_CANCELED: | ||
876 | case CPU_UP_CANCELED_FROZEN: | ||
877 | if (!per_cpu(ksoftirqd, hotcpu)) | ||
878 | break; | ||
879 | /* Unbind so it can run. Fall thru. */ | ||
880 | kthread_bind(per_cpu(ksoftirqd, hotcpu), | ||
881 | cpumask_any(cpu_online_mask)); | ||
882 | case CPU_DEAD: | 829 | case CPU_DEAD: |
883 | case CPU_DEAD_FROZEN: { | 830 | case CPU_DEAD_FROZEN: |
884 | static const struct sched_param param = { | 831 | takeover_tasklets((unsigned long)hcpu); |
885 | .sched_priority = MAX_RT_PRIO-1 | ||
886 | }; | ||
887 | |||
888 | p = per_cpu(ksoftirqd, hotcpu); | ||
889 | per_cpu(ksoftirqd, hotcpu) = NULL; | ||
890 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | ||
891 | kthread_stop(p); | ||
892 | takeover_tasklets(hotcpu); | ||
893 | break; | 832 | break; |
894 | } | ||
895 | #endif /* CONFIG_HOTPLUG_CPU */ | 833 | #endif /* CONFIG_HOTPLUG_CPU */ |
896 | } | 834 | } |
897 | return NOTIFY_OK; | 835 | return NOTIFY_OK; |
898 | } | 836 | } |
899 | 837 | ||
@@ -901,14 +839,19 @@ static struct notifier_block __cpuinitdata cpu_nfb = { | |||
901 | .notifier_call = cpu_callback | 839 | .notifier_call = cpu_callback |
902 | }; | 840 | }; |
903 | 841 | ||
842 | static struct smp_hotplug_thread softirq_threads = { | ||
843 | .store = &ksoftirqd, | ||
844 | .thread_should_run = ksoftirqd_should_run, | ||
845 | .thread_fn = run_ksoftirqd, | ||
846 | .thread_comm = "ksoftirqd/%u", | ||
847 | }; | ||
848 | |||
904 | static __init int spawn_ksoftirqd(void) | 849 | static __init int spawn_ksoftirqd(void) |
905 | { | 850 | { |
906 | void *cpu = (void *)(long)smp_processor_id(); | ||
907 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | ||
908 | |||
909 | BUG_ON(err != NOTIFY_OK); | ||
910 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | ||
911 | register_cpu_notifier(&cpu_nfb); | 851 | register_cpu_notifier(&cpu_nfb); |
852 | |||
853 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); | ||
854 | |||
912 | return 0; | 855 | return 0; |
913 | } | 856 | } |
914 | early_initcall(spawn_ksoftirqd); | 857 | early_initcall(spawn_ksoftirqd); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 87174ef59161..84c76a34e41c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -307,7 +307,7 @@ static struct ctl_table kern_table[] = { | |||
307 | .extra2 = &max_sched_tunable_scaling, | 307 | .extra2 = &max_sched_tunable_scaling, |
308 | }, | 308 | }, |
309 | { | 309 | { |
310 | .procname = "sched_migration_cost", | 310 | .procname = "sched_migration_cost_ns", |
311 | .data = &sysctl_sched_migration_cost, | 311 | .data = &sysctl_sched_migration_cost, |
312 | .maxlen = sizeof(unsigned int), | 312 | .maxlen = sizeof(unsigned int), |
313 | .mode = 0644, | 313 | .mode = 0644, |
@@ -321,14 +321,14 @@ static struct ctl_table kern_table[] = { | |||
321 | .proc_handler = proc_dointvec, | 321 | .proc_handler = proc_dointvec, |
322 | }, | 322 | }, |
323 | { | 323 | { |
324 | .procname = "sched_time_avg", | 324 | .procname = "sched_time_avg_ms", |
325 | .data = &sysctl_sched_time_avg, | 325 | .data = &sysctl_sched_time_avg, |
326 | .maxlen = sizeof(unsigned int), | 326 | .maxlen = sizeof(unsigned int), |
327 | .mode = 0644, | 327 | .mode = 0644, |
328 | .proc_handler = proc_dointvec, | 328 | .proc_handler = proc_dointvec, |
329 | }, | 329 | }, |
330 | { | 330 | { |
331 | .procname = "sched_shares_window", | 331 | .procname = "sched_shares_window_ns", |
332 | .data = &sysctl_sched_shares_window, | 332 | .data = &sysctl_sched_shares_window, |
333 | .maxlen = sizeof(unsigned int), | 333 | .maxlen = sizeof(unsigned int), |
334 | .mode = 0644, | 334 | .mode = 0644, |
@@ -1544,7 +1544,7 @@ static struct ctl_table fs_table[] = { | |||
1544 | 1544 | ||
1545 | static struct ctl_table debug_table[] = { | 1545 | static struct ctl_table debug_table[] = { |
1546 | #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \ | 1546 | #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \ |
1547 | defined(CONFIG_S390) || defined(CONFIG_TILE) | 1547 | defined(CONFIG_S390) || defined(CONFIG_TILE) || defined(CONFIG_ARM64) |
1548 | { | 1548 | { |
1549 | .procname = "exception-trace", | 1549 | .procname = "exception-trace", |
1550 | .data = &show_unhandled_signals, | 1550 | .data = &show_unhandled_signals, |
diff --git a/kernel/task_work.c b/kernel/task_work.c index d320d44903bd..65bd3c92d6f3 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c | |||
@@ -2,26 +2,20 @@ | |||
2 | #include <linux/task_work.h> | 2 | #include <linux/task_work.h> |
3 | #include <linux/tracehook.h> | 3 | #include <linux/tracehook.h> |
4 | 4 | ||
5 | static struct callback_head work_exited; /* all we need is ->next == NULL */ | ||
6 | |||
5 | int | 7 | int |
6 | task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) | 8 | task_work_add(struct task_struct *task, struct callback_head *work, bool notify) |
7 | { | 9 | { |
8 | struct callback_head *last, *first; | 10 | struct callback_head *head; |
9 | unsigned long flags; | ||
10 | 11 | ||
11 | /* | 12 | do { |
12 | * Not inserting the new work if the task has already passed | 13 | head = ACCESS_ONCE(task->task_works); |
13 | * exit_task_work() is the responisbility of callers. | 14 | if (unlikely(head == &work_exited)) |
14 | */ | 15 | return -ESRCH; |
15 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 16 | work->next = head; |
16 | last = task->task_works; | 17 | } while (cmpxchg(&task->task_works, head, work) != head); |
17 | first = last ? last->next : twork; | ||
18 | twork->next = first; | ||
19 | if (last) | ||
20 | last->next = twork; | ||
21 | task->task_works = twork; | ||
22 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | ||
23 | 18 | ||
24 | /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */ | ||
25 | if (notify) | 19 | if (notify) |
26 | set_notify_resume(task); | 20 | set_notify_resume(task); |
27 | return 0; | 21 | return 0; |
@@ -30,52 +24,69 @@ task_work_add(struct task_struct *task, struct callback_head *twork, bool notify | |||
30 | struct callback_head * | 24 | struct callback_head * |
31 | task_work_cancel(struct task_struct *task, task_work_func_t func) | 25 | task_work_cancel(struct task_struct *task, task_work_func_t func) |
32 | { | 26 | { |
27 | struct callback_head **pprev = &task->task_works; | ||
28 | struct callback_head *work = NULL; | ||
33 | unsigned long flags; | 29 | unsigned long flags; |
34 | struct callback_head *last, *res = NULL; | 30 | /* |
35 | 31 | * If cmpxchg() fails we continue without updating pprev. | |
32 | * Either we raced with task_work_add() which added the | ||
33 | * new entry before this work, we will find it again. Or | ||
34 | * we raced with task_work_run(), *pprev == NULL/exited. | ||
35 | */ | ||
36 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 36 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
37 | last = task->task_works; | 37 | while ((work = ACCESS_ONCE(*pprev))) { |
38 | if (last) { | 38 | read_barrier_depends(); |
39 | struct callback_head *q = last, *p = q->next; | 39 | if (work->func != func) |
40 | while (1) { | 40 | pprev = &work->next; |
41 | if (p->func == func) { | 41 | else if (cmpxchg(pprev, work, work->next) == work) |
42 | q->next = p->next; | 42 | break; |
43 | if (p == last) | ||
44 | task->task_works = q == p ? NULL : q; | ||
45 | res = p; | ||
46 | break; | ||
47 | } | ||
48 | if (p == last) | ||
49 | break; | ||
50 | q = p; | ||
51 | p = q->next; | ||
52 | } | ||
53 | } | 43 | } |
54 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 44 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
55 | return res; | 45 | |
46 | return work; | ||
56 | } | 47 | } |
57 | 48 | ||
58 | void task_work_run(void) | 49 | void task_work_run(void) |
59 | { | 50 | { |
60 | struct task_struct *task = current; | 51 | struct task_struct *task = current; |
61 | struct callback_head *p, *q; | 52 | struct callback_head *work, *head, *next; |
53 | |||
54 | for (;;) { | ||
55 | /* | ||
56 | * work->func() can do task_work_add(), do not set | ||
57 | * work_exited unless the list is empty. | ||
58 | */ | ||
59 | do { | ||
60 | work = ACCESS_ONCE(task->task_works); | ||
61 | head = !work && (task->flags & PF_EXITING) ? | ||
62 | &work_exited : NULL; | ||
63 | } while (cmpxchg(&task->task_works, work, head) != work); | ||
62 | 64 | ||
63 | while (1) { | 65 | if (!work) |
64 | raw_spin_lock_irq(&task->pi_lock); | 66 | break; |
65 | p = task->task_works; | 67 | /* |
66 | task->task_works = NULL; | 68 | * Synchronize with task_work_cancel(). It can't remove |
67 | raw_spin_unlock_irq(&task->pi_lock); | 69 | * the first entry == work, cmpxchg(task_works) should |
70 | * fail, but it can play with *work and other entries. | ||
71 | */ | ||
72 | raw_spin_unlock_wait(&task->pi_lock); | ||
73 | smp_mb(); | ||
68 | 74 | ||
69 | if (unlikely(!p)) | 75 | /* Reverse the list to run the works in fifo order */ |
70 | return; | 76 | head = NULL; |
77 | do { | ||
78 | next = work->next; | ||
79 | work->next = head; | ||
80 | head = work; | ||
81 | work = next; | ||
82 | } while (work); | ||
71 | 83 | ||
72 | q = p->next; /* head */ | 84 | work = head; |
73 | p->next = NULL; /* cut it */ | 85 | do { |
74 | while (q) { | 86 | next = work->next; |
75 | p = q->next; | 87 | work->func(work); |
76 | q->func(q); | 88 | work = next; |
77 | q = p; | ||
78 | cond_resched(); | 89 | cond_resched(); |
79 | } | 90 | } while (work); |
80 | } | 91 | } |
81 | } | 92 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 024540f97f74..f423bdd035c2 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -372,7 +372,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
372 | * the scheduler tick in nohz_restart_sched_tick. | 372 | * the scheduler tick in nohz_restart_sched_tick. |
373 | */ | 373 | */ |
374 | if (!ts->tick_stopped) { | 374 | if (!ts->tick_stopped) { |
375 | select_nohz_load_balancer(1); | 375 | nohz_balance_enter_idle(cpu); |
376 | calc_load_enter_idle(); | 376 | calc_load_enter_idle(); |
377 | 377 | ||
378 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); | 378 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); |
@@ -436,7 +436,8 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) | |||
436 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { | 436 | if (unlikely(local_softirq_pending() && cpu_online(cpu))) { |
437 | static int ratelimit; | 437 | static int ratelimit; |
438 | 438 | ||
439 | if (ratelimit < 10) { | 439 | if (ratelimit < 10 && |
440 | (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { | ||
440 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", | 441 | printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
441 | (unsigned int) local_softirq_pending()); | 442 | (unsigned int) local_softirq_pending()); |
442 | ratelimit++; | 443 | ratelimit++; |
@@ -569,10 +570,10 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) | |||
569 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) | 570 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) |
570 | { | 571 | { |
571 | /* Update jiffies first */ | 572 | /* Update jiffies first */ |
572 | select_nohz_load_balancer(0); | ||
573 | tick_do_update_jiffies64(now); | 573 | tick_do_update_jiffies64(now); |
574 | update_cpu_load_nohz(); | 574 | update_cpu_load_nohz(); |
575 | 575 | ||
576 | calc_load_exit_idle(); | ||
576 | touch_softlockup_watchdog(); | 577 | touch_softlockup_watchdog(); |
577 | /* | 578 | /* |
578 | * Cancel the scheduled timer and restore the tick | 579 | * Cancel the scheduled timer and restore the tick |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e16af197a2bc..d3b91e75cecd 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -115,6 +115,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) | |||
115 | { | 115 | { |
116 | tk->xtime_sec += ts->tv_sec; | 116 | tk->xtime_sec += ts->tv_sec; |
117 | tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; | 117 | tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; |
118 | tk_normalize_xtime(tk); | ||
118 | } | 119 | } |
119 | 120 | ||
120 | static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) | 121 | static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) |
@@ -276,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk) | |||
276 | tk->xtime_nsec += cycle_delta * tk->mult; | 277 | tk->xtime_nsec += cycle_delta * tk->mult; |
277 | 278 | ||
278 | /* If arch requires, add in gettimeoffset() */ | 279 | /* If arch requires, add in gettimeoffset() */ |
279 | tk->xtime_nsec += arch_gettimeoffset() << tk->shift; | 280 | tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; |
280 | 281 | ||
281 | tk_normalize_xtime(tk); | 282 | tk_normalize_xtime(tk); |
282 | 283 | ||
@@ -302,10 +303,11 @@ void getnstimeofday(struct timespec *ts) | |||
302 | seq = read_seqbegin(&tk->lock); | 303 | seq = read_seqbegin(&tk->lock); |
303 | 304 | ||
304 | ts->tv_sec = tk->xtime_sec; | 305 | ts->tv_sec = tk->xtime_sec; |
305 | ts->tv_nsec = timekeeping_get_ns(tk); | 306 | nsecs = timekeeping_get_ns(tk); |
306 | 307 | ||
307 | } while (read_seqretry(&tk->lock, seq)); | 308 | } while (read_seqretry(&tk->lock, seq)); |
308 | 309 | ||
310 | ts->tv_nsec = 0; | ||
309 | timespec_add_ns(ts, nsecs); | 311 | timespec_add_ns(ts, nsecs); |
310 | } | 312 | } |
311 | EXPORT_SYMBOL(getnstimeofday); | 313 | EXPORT_SYMBOL(getnstimeofday); |
@@ -344,6 +346,7 @@ void ktime_get_ts(struct timespec *ts) | |||
344 | { | 346 | { |
345 | struct timekeeper *tk = &timekeeper; | 347 | struct timekeeper *tk = &timekeeper; |
346 | struct timespec tomono; | 348 | struct timespec tomono; |
349 | s64 nsec; | ||
347 | unsigned int seq; | 350 | unsigned int seq; |
348 | 351 | ||
349 | WARN_ON(timekeeping_suspended); | 352 | WARN_ON(timekeeping_suspended); |
@@ -351,13 +354,14 @@ void ktime_get_ts(struct timespec *ts) | |||
351 | do { | 354 | do { |
352 | seq = read_seqbegin(&tk->lock); | 355 | seq = read_seqbegin(&tk->lock); |
353 | ts->tv_sec = tk->xtime_sec; | 356 | ts->tv_sec = tk->xtime_sec; |
354 | ts->tv_nsec = timekeeping_get_ns(tk); | 357 | nsec = timekeeping_get_ns(tk); |
355 | tomono = tk->wall_to_monotonic; | 358 | tomono = tk->wall_to_monotonic; |
356 | 359 | ||
357 | } while (read_seqretry(&tk->lock, seq)); | 360 | } while (read_seqretry(&tk->lock, seq)); |
358 | 361 | ||
359 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | 362 | ts->tv_sec += tomono.tv_sec; |
360 | ts->tv_nsec + tomono.tv_nsec); | 363 | ts->tv_nsec = 0; |
364 | timespec_add_ns(ts, nsec + tomono.tv_nsec); | ||
361 | } | 365 | } |
362 | EXPORT_SYMBOL_GPL(ktime_get_ts); | 366 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
363 | 367 | ||
@@ -427,7 +431,7 @@ int do_settimeofday(const struct timespec *tv) | |||
427 | struct timespec ts_delta, xt; | 431 | struct timespec ts_delta, xt; |
428 | unsigned long flags; | 432 | unsigned long flags; |
429 | 433 | ||
430 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | 434 | if (!timespec_valid_strict(tv)) |
431 | return -EINVAL; | 435 | return -EINVAL; |
432 | 436 | ||
433 | write_seqlock_irqsave(&tk->lock, flags); | 437 | write_seqlock_irqsave(&tk->lock, flags); |
@@ -463,6 +467,8 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
463 | { | 467 | { |
464 | struct timekeeper *tk = &timekeeper; | 468 | struct timekeeper *tk = &timekeeper; |
465 | unsigned long flags; | 469 | unsigned long flags; |
470 | struct timespec tmp; | ||
471 | int ret = 0; | ||
466 | 472 | ||
467 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) | 473 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
468 | return -EINVAL; | 474 | return -EINVAL; |
@@ -471,10 +477,17 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
471 | 477 | ||
472 | timekeeping_forward_now(tk); | 478 | timekeeping_forward_now(tk); |
473 | 479 | ||
480 | /* Make sure the proposed value is valid */ | ||
481 | tmp = timespec_add(tk_xtime(tk), *ts); | ||
482 | if (!timespec_valid_strict(&tmp)) { | ||
483 | ret = -EINVAL; | ||
484 | goto error; | ||
485 | } | ||
474 | 486 | ||
475 | tk_xtime_add(tk, ts); | 487 | tk_xtime_add(tk, ts); |
476 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); | 488 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); |
477 | 489 | ||
490 | error: /* even if we error out, we forwarded the time, so call update */ | ||
478 | timekeeping_update(tk, true); | 491 | timekeeping_update(tk, true); |
479 | 492 | ||
480 | write_sequnlock_irqrestore(&tk->lock, flags); | 493 | write_sequnlock_irqrestore(&tk->lock, flags); |
@@ -482,7 +495,7 @@ int timekeeping_inject_offset(struct timespec *ts) | |||
482 | /* signal hrtimers about time change */ | 495 | /* signal hrtimers about time change */ |
483 | clock_was_set(); | 496 | clock_was_set(); |
484 | 497 | ||
485 | return 0; | 498 | return ret; |
486 | } | 499 | } |
487 | EXPORT_SYMBOL(timekeeping_inject_offset); | 500 | EXPORT_SYMBOL(timekeeping_inject_offset); |
488 | 501 | ||
@@ -649,7 +662,20 @@ void __init timekeeping_init(void) | |||
649 | struct timespec now, boot, tmp; | 662 | struct timespec now, boot, tmp; |
650 | 663 | ||
651 | read_persistent_clock(&now); | 664 | read_persistent_clock(&now); |
665 | if (!timespec_valid_strict(&now)) { | ||
666 | pr_warn("WARNING: Persistent clock returned invalid value!\n" | ||
667 | " Check your CMOS/BIOS settings.\n"); | ||
668 | now.tv_sec = 0; | ||
669 | now.tv_nsec = 0; | ||
670 | } | ||
671 | |||
652 | read_boot_clock(&boot); | 672 | read_boot_clock(&boot); |
673 | if (!timespec_valid_strict(&boot)) { | ||
674 | pr_warn("WARNING: Boot clock returned invalid value!\n" | ||
675 | " Check your CMOS/BIOS settings.\n"); | ||
676 | boot.tv_sec = 0; | ||
677 | boot.tv_nsec = 0; | ||
678 | } | ||
653 | 679 | ||
654 | seqlock_init(&tk->lock); | 680 | seqlock_init(&tk->lock); |
655 | 681 | ||
@@ -690,7 +716,7 @@ static struct timespec timekeeping_suspend_time; | |||
690 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, | 716 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, |
691 | struct timespec *delta) | 717 | struct timespec *delta) |
692 | { | 718 | { |
693 | if (!timespec_valid(delta)) { | 719 | if (!timespec_valid_strict(delta)) { |
694 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " | 720 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " |
695 | "sleep delta value!\n"); | 721 | "sleep delta value!\n"); |
696 | return; | 722 | return; |
@@ -1129,6 +1155,10 @@ static void update_wall_time(void) | |||
1129 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; | 1155 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
1130 | #endif | 1156 | #endif |
1131 | 1157 | ||
1158 | /* Check if there's really nothing to do */ | ||
1159 | if (offset < tk->cycle_interval) | ||
1160 | goto out; | ||
1161 | |||
1132 | /* | 1162 | /* |
1133 | * With NO_HZ we may have to accumulate many cycle_intervals | 1163 | * With NO_HZ we may have to accumulate many cycle_intervals |
1134 | * (think "ticks") worth of time at once. To do this efficiently, | 1164 | * (think "ticks") worth of time at once. To do this efficiently, |
@@ -1161,9 +1191,9 @@ static void update_wall_time(void) | |||
1161 | * the vsyscall implementations are converted to use xtime_nsec | 1191 | * the vsyscall implementations are converted to use xtime_nsec |
1162 | * (shifted nanoseconds), this can be killed. | 1192 | * (shifted nanoseconds), this can be killed. |
1163 | */ | 1193 | */ |
1164 | remainder = tk->xtime_nsec & ((1 << tk->shift) - 1); | 1194 | remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); |
1165 | tk->xtime_nsec -= remainder; | 1195 | tk->xtime_nsec -= remainder; |
1166 | tk->xtime_nsec += 1 << tk->shift; | 1196 | tk->xtime_nsec += 1ULL << tk->shift; |
1167 | tk->ntp_error += remainder << tk->ntp_error_shift; | 1197 | tk->ntp_error += remainder << tk->ntp_error_shift; |
1168 | 1198 | ||
1169 | /* | 1199 | /* |
@@ -1217,6 +1247,7 @@ void get_monotonic_boottime(struct timespec *ts) | |||
1217 | { | 1247 | { |
1218 | struct timekeeper *tk = &timekeeper; | 1248 | struct timekeeper *tk = &timekeeper; |
1219 | struct timespec tomono, sleep; | 1249 | struct timespec tomono, sleep; |
1250 | s64 nsec; | ||
1220 | unsigned int seq; | 1251 | unsigned int seq; |
1221 | 1252 | ||
1222 | WARN_ON(timekeeping_suspended); | 1253 | WARN_ON(timekeeping_suspended); |
@@ -1224,14 +1255,15 @@ void get_monotonic_boottime(struct timespec *ts) | |||
1224 | do { | 1255 | do { |
1225 | seq = read_seqbegin(&tk->lock); | 1256 | seq = read_seqbegin(&tk->lock); |
1226 | ts->tv_sec = tk->xtime_sec; | 1257 | ts->tv_sec = tk->xtime_sec; |
1227 | ts->tv_nsec = timekeeping_get_ns(tk); | 1258 | nsec = timekeeping_get_ns(tk); |
1228 | tomono = tk->wall_to_monotonic; | 1259 | tomono = tk->wall_to_monotonic; |
1229 | sleep = tk->total_sleep_time; | 1260 | sleep = tk->total_sleep_time; |
1230 | 1261 | ||
1231 | } while (read_seqretry(&tk->lock, seq)); | 1262 | } while (read_seqretry(&tk->lock, seq)); |
1232 | 1263 | ||
1233 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, | 1264 | ts->tv_sec += tomono.tv_sec + sleep.tv_sec; |
1234 | ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); | 1265 | ts->tv_nsec = 0; |
1266 | timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec); | ||
1235 | } | 1267 | } |
1236 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); | 1268 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); |
1237 | 1269 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 8c5e7b908c68..d5de1b2292aa 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -92,24 +92,25 @@ static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; | |||
92 | /* Functions below help us manage 'deferrable' flag */ | 92 | /* Functions below help us manage 'deferrable' flag */ |
93 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) | 93 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) |
94 | { | 94 | { |
95 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); | 95 | return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE); |
96 | } | 96 | } |
97 | 97 | ||
98 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) | 98 | static inline unsigned int tbase_get_irqsafe(struct tvec_base *base) |
99 | { | 99 | { |
100 | return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); | 100 | return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE); |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline void timer_set_deferrable(struct timer_list *timer) | 103 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) |
104 | { | 104 | { |
105 | timer->base = TBASE_MAKE_DEFERRED(timer->base); | 105 | return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK)); |
106 | } | 106 | } |
107 | 107 | ||
108 | static inline void | 108 | static inline void |
109 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) | 109 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) |
110 | { | 110 | { |
111 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | | 111 | unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK; |
112 | tbase_get_deferrable(timer->base)); | 112 | |
113 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags); | ||
113 | } | 114 | } |
114 | 115 | ||
115 | static unsigned long round_jiffies_common(unsigned long j, int cpu, | 116 | static unsigned long round_jiffies_common(unsigned long j, int cpu, |
@@ -563,16 +564,14 @@ static inline void debug_timer_assert_init(struct timer_list *timer) | |||
563 | debug_object_assert_init(timer, &timer_debug_descr); | 564 | debug_object_assert_init(timer, &timer_debug_descr); |
564 | } | 565 | } |
565 | 566 | ||
566 | static void __init_timer(struct timer_list *timer, | 567 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
567 | const char *name, | 568 | const char *name, struct lock_class_key *key); |
568 | struct lock_class_key *key); | ||
569 | 569 | ||
570 | void init_timer_on_stack_key(struct timer_list *timer, | 570 | void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags, |
571 | const char *name, | 571 | const char *name, struct lock_class_key *key) |
572 | struct lock_class_key *key) | ||
573 | { | 572 | { |
574 | debug_object_init_on_stack(timer, &timer_debug_descr); | 573 | debug_object_init_on_stack(timer, &timer_debug_descr); |
575 | __init_timer(timer, name, key); | 574 | do_init_timer(timer, flags, name, key); |
576 | } | 575 | } |
577 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); | 576 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
578 | 577 | ||
@@ -613,12 +612,13 @@ static inline void debug_assert_init(struct timer_list *timer) | |||
613 | debug_timer_assert_init(timer); | 612 | debug_timer_assert_init(timer); |
614 | } | 613 | } |
615 | 614 | ||
616 | static void __init_timer(struct timer_list *timer, | 615 | static void do_init_timer(struct timer_list *timer, unsigned int flags, |
617 | const char *name, | 616 | const char *name, struct lock_class_key *key) |
618 | struct lock_class_key *key) | ||
619 | { | 617 | { |
618 | struct tvec_base *base = __raw_get_cpu_var(tvec_bases); | ||
619 | |||
620 | timer->entry.next = NULL; | 620 | timer->entry.next = NULL; |
621 | timer->base = __raw_get_cpu_var(tvec_bases); | 621 | timer->base = (void *)((unsigned long)base | flags); |
622 | timer->slack = -1; | 622 | timer->slack = -1; |
623 | #ifdef CONFIG_TIMER_STATS | 623 | #ifdef CONFIG_TIMER_STATS |
624 | timer->start_site = NULL; | 624 | timer->start_site = NULL; |
@@ -628,22 +628,10 @@ static void __init_timer(struct timer_list *timer, | |||
628 | lockdep_init_map(&timer->lockdep_map, name, key, 0); | 628 | lockdep_init_map(&timer->lockdep_map, name, key, 0); |
629 | } | 629 | } |
630 | 630 | ||
631 | void setup_deferrable_timer_on_stack_key(struct timer_list *timer, | ||
632 | const char *name, | ||
633 | struct lock_class_key *key, | ||
634 | void (*function)(unsigned long), | ||
635 | unsigned long data) | ||
636 | { | ||
637 | timer->function = function; | ||
638 | timer->data = data; | ||
639 | init_timer_on_stack_key(timer, name, key); | ||
640 | timer_set_deferrable(timer); | ||
641 | } | ||
642 | EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key); | ||
643 | |||
644 | /** | 631 | /** |
645 | * init_timer_key - initialize a timer | 632 | * init_timer_key - initialize a timer |
646 | * @timer: the timer to be initialized | 633 | * @timer: the timer to be initialized |
634 | * @flags: timer flags | ||
647 | * @name: name of the timer | 635 | * @name: name of the timer |
648 | * @key: lockdep class key of the fake lock used for tracking timer | 636 | * @key: lockdep class key of the fake lock used for tracking timer |
649 | * sync lock dependencies | 637 | * sync lock dependencies |
@@ -651,24 +639,14 @@ EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key); | |||
651 | * init_timer_key() must be done to a timer prior calling *any* of the | 639 | * init_timer_key() must be done to a timer prior calling *any* of the |
652 | * other timer functions. | 640 | * other timer functions. |
653 | */ | 641 | */ |
654 | void init_timer_key(struct timer_list *timer, | 642 | void init_timer_key(struct timer_list *timer, unsigned int flags, |
655 | const char *name, | 643 | const char *name, struct lock_class_key *key) |
656 | struct lock_class_key *key) | ||
657 | { | 644 | { |
658 | debug_init(timer); | 645 | debug_init(timer); |
659 | __init_timer(timer, name, key); | 646 | do_init_timer(timer, flags, name, key); |
660 | } | 647 | } |
661 | EXPORT_SYMBOL(init_timer_key); | 648 | EXPORT_SYMBOL(init_timer_key); |
662 | 649 | ||
663 | void init_timer_deferrable_key(struct timer_list *timer, | ||
664 | const char *name, | ||
665 | struct lock_class_key *key) | ||
666 | { | ||
667 | init_timer_key(timer, name, key); | ||
668 | timer_set_deferrable(timer); | ||
669 | } | ||
670 | EXPORT_SYMBOL(init_timer_deferrable_key); | ||
671 | |||
672 | static inline void detach_timer(struct timer_list *timer, bool clear_pending) | 650 | static inline void detach_timer(struct timer_list *timer, bool clear_pending) |
673 | { | 651 | { |
674 | struct list_head *entry = &timer->entry; | 652 | struct list_head *entry = &timer->entry; |
@@ -686,7 +664,7 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base) | |||
686 | { | 664 | { |
687 | detach_timer(timer, true); | 665 | detach_timer(timer, true); |
688 | if (!tbase_get_deferrable(timer->base)) | 666 | if (!tbase_get_deferrable(timer->base)) |
689 | timer->base->active_timers--; | 667 | base->active_timers--; |
690 | } | 668 | } |
691 | 669 | ||
692 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, | 670 | static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, |
@@ -697,7 +675,7 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, | |||
697 | 675 | ||
698 | detach_timer(timer, clear_pending); | 676 | detach_timer(timer, clear_pending); |
699 | if (!tbase_get_deferrable(timer->base)) { | 677 | if (!tbase_get_deferrable(timer->base)) { |
700 | timer->base->active_timers--; | 678 | base->active_timers--; |
701 | if (timer->expires == base->next_timer) | 679 | if (timer->expires == base->next_timer) |
702 | base->next_timer = base->timer_jiffies; | 680 | base->next_timer = base->timer_jiffies; |
703 | } | 681 | } |
@@ -1029,14 +1007,14 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
1029 | * | 1007 | * |
1030 | * Synchronization rules: Callers must prevent restarting of the timer, | 1008 | * Synchronization rules: Callers must prevent restarting of the timer, |
1031 | * otherwise this function is meaningless. It must not be called from | 1009 | * otherwise this function is meaningless. It must not be called from |
1032 | * interrupt contexts. The caller must not hold locks which would prevent | 1010 | * interrupt contexts unless the timer is an irqsafe one. The caller must |
1033 | * completion of the timer's handler. The timer's handler must not call | 1011 | * not hold locks which would prevent completion of the timer's |
1034 | * add_timer_on(). Upon exit the timer is not queued and the handler is | 1012 | * handler. The timer's handler must not call add_timer_on(). Upon exit the |
1035 | * not running on any CPU. | 1013 | * timer is not queued and the handler is not running on any CPU. |
1036 | * | 1014 | * |
1037 | * Note: You must not hold locks that are held in interrupt context | 1015 | * Note: For !irqsafe timers, you must not hold locks that are held in |
1038 | * while calling this function. Even if the lock has nothing to do | 1016 | * interrupt context while calling this function. Even if the lock has |
1039 | * with the timer in question. Here's why: | 1017 | * nothing to do with the timer in question. Here's why: |
1040 | * | 1018 | * |
1041 | * CPU0 CPU1 | 1019 | * CPU0 CPU1 |
1042 | * ---- ---- | 1020 | * ---- ---- |
@@ -1073,7 +1051,7 @@ int del_timer_sync(struct timer_list *timer) | |||
1073 | * don't use it in hardirq context, because it | 1051 | * don't use it in hardirq context, because it |
1074 | * could lead to deadlock. | 1052 | * could lead to deadlock. |
1075 | */ | 1053 | */ |
1076 | WARN_ON(in_irq()); | 1054 | WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base)); |
1077 | for (;;) { | 1055 | for (;;) { |
1078 | int ret = try_to_del_timer_sync(timer); | 1056 | int ret = try_to_del_timer_sync(timer); |
1079 | if (ret >= 0) | 1057 | if (ret >= 0) |
@@ -1180,19 +1158,27 @@ static inline void __run_timers(struct tvec_base *base) | |||
1180 | while (!list_empty(head)) { | 1158 | while (!list_empty(head)) { |
1181 | void (*fn)(unsigned long); | 1159 | void (*fn)(unsigned long); |
1182 | unsigned long data; | 1160 | unsigned long data; |
1161 | bool irqsafe; | ||
1183 | 1162 | ||
1184 | timer = list_first_entry(head, struct timer_list,entry); | 1163 | timer = list_first_entry(head, struct timer_list,entry); |
1185 | fn = timer->function; | 1164 | fn = timer->function; |
1186 | data = timer->data; | 1165 | data = timer->data; |
1166 | irqsafe = tbase_get_irqsafe(timer->base); | ||
1187 | 1167 | ||
1188 | timer_stats_account_timer(timer); | 1168 | timer_stats_account_timer(timer); |
1189 | 1169 | ||
1190 | base->running_timer = timer; | 1170 | base->running_timer = timer; |
1191 | detach_expired_timer(timer, base); | 1171 | detach_expired_timer(timer, base); |
1192 | 1172 | ||
1193 | spin_unlock_irq(&base->lock); | 1173 | if (irqsafe) { |
1194 | call_timer_fn(timer, fn, data); | 1174 | spin_unlock(&base->lock); |
1195 | spin_lock_irq(&base->lock); | 1175 | call_timer_fn(timer, fn, data); |
1176 | spin_lock(&base->lock); | ||
1177 | } else { | ||
1178 | spin_unlock_irq(&base->lock); | ||
1179 | call_timer_fn(timer, fn, data); | ||
1180 | spin_lock_irq(&base->lock); | ||
1181 | } | ||
1196 | } | 1182 | } |
1197 | } | 1183 | } |
1198 | base->running_timer = NULL; | 1184 | base->running_timer = NULL; |
@@ -1791,9 +1777,13 @@ static struct notifier_block __cpuinitdata timers_nb = { | |||
1791 | 1777 | ||
1792 | void __init init_timers(void) | 1778 | void __init init_timers(void) |
1793 | { | 1779 | { |
1794 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | 1780 | int err; |
1795 | (void *)(long)smp_processor_id()); | 1781 | |
1782 | /* ensure there are enough low bits for flags in timer->base pointer */ | ||
1783 | BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK); | ||
1796 | 1784 | ||
1785 | err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | ||
1786 | (void *)(long)smp_processor_id()); | ||
1797 | init_timer_stats(); | 1787 | init_timer_stats(); |
1798 | 1788 | ||
1799 | BUG_ON(err != NOTIFY_OK); | 1789 | BUG_ON(err != NOTIFY_OK); |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8c4c07071cc5..4cea4f41c1d9 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -49,6 +49,11 @@ config HAVE_SYSCALL_TRACEPOINTS | |||
49 | help | 49 | help |
50 | See Documentation/trace/ftrace-design.txt | 50 | See Documentation/trace/ftrace-design.txt |
51 | 51 | ||
52 | config HAVE_FENTRY | ||
53 | bool | ||
54 | help | ||
55 | Arch supports the gcc options -pg with -mfentry | ||
56 | |||
52 | config HAVE_C_RECORDMCOUNT | 57 | config HAVE_C_RECORDMCOUNT |
53 | bool | 58 | bool |
54 | help | 59 | help |
@@ -57,8 +62,12 @@ config HAVE_C_RECORDMCOUNT | |||
57 | config TRACER_MAX_TRACE | 62 | config TRACER_MAX_TRACE |
58 | bool | 63 | bool |
59 | 64 | ||
65 | config TRACE_CLOCK | ||
66 | bool | ||
67 | |||
60 | config RING_BUFFER | 68 | config RING_BUFFER |
61 | bool | 69 | bool |
70 | select TRACE_CLOCK | ||
62 | 71 | ||
63 | config FTRACE_NMI_ENTER | 72 | config FTRACE_NMI_ENTER |
64 | bool | 73 | bool |
@@ -109,6 +118,7 @@ config TRACING | |||
109 | select NOP_TRACER | 118 | select NOP_TRACER |
110 | select BINARY_PRINTF | 119 | select BINARY_PRINTF |
111 | select EVENT_TRACING | 120 | select EVENT_TRACING |
121 | select TRACE_CLOCK | ||
112 | 122 | ||
113 | config GENERIC_TRACER | 123 | config GENERIC_TRACER |
114 | bool | 124 | bool |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index b831087c8200..d7e2068e4b71 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -5,10 +5,12 @@ ifdef CONFIG_FUNCTION_TRACER | |||
5 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 5 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
6 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) | 6 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) |
7 | 7 | ||
8 | ifdef CONFIG_FTRACE_SELFTEST | ||
8 | # selftest needs instrumentation | 9 | # selftest needs instrumentation |
9 | CFLAGS_trace_selftest_dynamic.o = -pg | 10 | CFLAGS_trace_selftest_dynamic.o = -pg |
10 | obj-y += trace_selftest_dynamic.o | 11 | obj-y += trace_selftest_dynamic.o |
11 | endif | 12 | endif |
13 | endif | ||
12 | 14 | ||
13 | # If unlikely tracing is enabled, do not trace these files | 15 | # If unlikely tracing is enabled, do not trace these files |
14 | ifdef CONFIG_TRACING_BRANCHES | 16 | ifdef CONFIG_TRACING_BRANCHES |
@@ -17,11 +19,7 @@ endif | |||
17 | 19 | ||
18 | CFLAGS_trace_events_filter.o := -I$(src) | 20 | CFLAGS_trace_events_filter.o := -I$(src) |
19 | 21 | ||
20 | # | 22 | obj-$(CONFIG_TRACE_CLOCK) += trace_clock.o |
21 | # Make the trace clocks available generally: it's infrastructure | ||
22 | # relied on by ptrace for example: | ||
23 | # | ||
24 | obj-y += trace_clock.o | ||
25 | 23 | ||
26 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o | 24 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o |
27 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 25 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b4f20fba09fc..9dcf15d38380 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -64,12 +64,20 @@ | |||
64 | 64 | ||
65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) | 65 | #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) |
66 | 66 | ||
67 | static struct ftrace_ops ftrace_list_end __read_mostly = { | ||
68 | .func = ftrace_stub, | ||
69 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
70 | }; | ||
71 | |||
67 | /* ftrace_enabled is a method to turn ftrace on or off */ | 72 | /* ftrace_enabled is a method to turn ftrace on or off */ |
68 | int ftrace_enabled __read_mostly; | 73 | int ftrace_enabled __read_mostly; |
69 | static int last_ftrace_enabled; | 74 | static int last_ftrace_enabled; |
70 | 75 | ||
71 | /* Quick disabling of function tracer. */ | 76 | /* Quick disabling of function tracer. */ |
72 | int function_trace_stop; | 77 | int function_trace_stop __read_mostly; |
78 | |||
79 | /* Current function tracing op */ | ||
80 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | ||
73 | 81 | ||
74 | /* List for set_ftrace_pid's pids. */ | 82 | /* List for set_ftrace_pid's pids. */ |
75 | LIST_HEAD(ftrace_pids); | 83 | LIST_HEAD(ftrace_pids); |
@@ -86,22 +94,43 @@ static int ftrace_disabled __read_mostly; | |||
86 | 94 | ||
87 | static DEFINE_MUTEX(ftrace_lock); | 95 | static DEFINE_MUTEX(ftrace_lock); |
88 | 96 | ||
89 | static struct ftrace_ops ftrace_list_end __read_mostly = { | ||
90 | .func = ftrace_stub, | ||
91 | }; | ||
92 | |||
93 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; | 97 | static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; |
94 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | 98 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; |
95 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 99 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
96 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 100 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
97 | static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; | ||
98 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | ||
99 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | 101 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; |
100 | static struct ftrace_ops global_ops; | 102 | static struct ftrace_ops global_ops; |
101 | static struct ftrace_ops control_ops; | 103 | static struct ftrace_ops control_ops; |
102 | 104 | ||
103 | static void | 105 | #if ARCH_SUPPORTS_FTRACE_OPS |
104 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | 106 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
107 | struct ftrace_ops *op, struct pt_regs *regs); | ||
108 | #else | ||
109 | /* See comment below, where ftrace_ops_list_func is defined */ | ||
110 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | ||
111 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) | ||
112 | #endif | ||
113 | |||
114 | /** | ||
115 | * ftrace_nr_registered_ops - return number of ops registered | ||
116 | * | ||
117 | * Returns the number of ftrace_ops registered and tracing functions | ||
118 | */ | ||
119 | int ftrace_nr_registered_ops(void) | ||
120 | { | ||
121 | struct ftrace_ops *ops; | ||
122 | int cnt = 0; | ||
123 | |||
124 | mutex_lock(&ftrace_lock); | ||
125 | |||
126 | for (ops = ftrace_ops_list; | ||
127 | ops != &ftrace_list_end; ops = ops->next) | ||
128 | cnt++; | ||
129 | |||
130 | mutex_unlock(&ftrace_lock); | ||
131 | |||
132 | return cnt; | ||
133 | } | ||
105 | 134 | ||
106 | /* | 135 | /* |
107 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | 136 | * Traverse the ftrace_global_list, invoking all entries. The reason that we |
@@ -112,29 +141,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); | |||
112 | * | 141 | * |
113 | * Silly Alpha and silly pointer-speculation compiler optimizations! | 142 | * Silly Alpha and silly pointer-speculation compiler optimizations! |
114 | */ | 143 | */ |
115 | static void ftrace_global_list_func(unsigned long ip, | 144 | static void |
116 | unsigned long parent_ip) | 145 | ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, |
146 | struct ftrace_ops *op, struct pt_regs *regs) | ||
117 | { | 147 | { |
118 | struct ftrace_ops *op; | ||
119 | |||
120 | if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) | 148 | if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) |
121 | return; | 149 | return; |
122 | 150 | ||
123 | trace_recursion_set(TRACE_GLOBAL_BIT); | 151 | trace_recursion_set(TRACE_GLOBAL_BIT); |
124 | op = rcu_dereference_raw(ftrace_global_list); /*see above*/ | 152 | op = rcu_dereference_raw(ftrace_global_list); /*see above*/ |
125 | while (op != &ftrace_list_end) { | 153 | while (op != &ftrace_list_end) { |
126 | op->func(ip, parent_ip); | 154 | op->func(ip, parent_ip, op, regs); |
127 | op = rcu_dereference_raw(op->next); /*see above*/ | 155 | op = rcu_dereference_raw(op->next); /*see above*/ |
128 | }; | 156 | }; |
129 | trace_recursion_clear(TRACE_GLOBAL_BIT); | 157 | trace_recursion_clear(TRACE_GLOBAL_BIT); |
130 | } | 158 | } |
131 | 159 | ||
132 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | 160 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
161 | struct ftrace_ops *op, struct pt_regs *regs) | ||
133 | { | 162 | { |
134 | if (!test_tsk_trace_trace(current)) | 163 | if (!test_tsk_trace_trace(current)) |
135 | return; | 164 | return; |
136 | 165 | ||
137 | ftrace_pid_function(ip, parent_ip); | 166 | ftrace_pid_function(ip, parent_ip, op, regs); |
138 | } | 167 | } |
139 | 168 | ||
140 | static void set_ftrace_pid_function(ftrace_func_t func) | 169 | static void set_ftrace_pid_function(ftrace_func_t func) |
@@ -153,25 +182,9 @@ static void set_ftrace_pid_function(ftrace_func_t func) | |||
153 | void clear_ftrace_function(void) | 182 | void clear_ftrace_function(void) |
154 | { | 183 | { |
155 | ftrace_trace_function = ftrace_stub; | 184 | ftrace_trace_function = ftrace_stub; |
156 | __ftrace_trace_function = ftrace_stub; | ||
157 | __ftrace_trace_function_delay = ftrace_stub; | ||
158 | ftrace_pid_function = ftrace_stub; | 185 | ftrace_pid_function = ftrace_stub; |
159 | } | 186 | } |
160 | 187 | ||
161 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
162 | /* | ||
163 | * For those archs that do not test ftrace_trace_stop in their | ||
164 | * mcount call site, we need to do it from C. | ||
165 | */ | ||
166 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | ||
167 | { | ||
168 | if (function_trace_stop) | ||
169 | return; | ||
170 | |||
171 | __ftrace_trace_function(ip, parent_ip); | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | static void control_ops_disable_all(struct ftrace_ops *ops) | 188 | static void control_ops_disable_all(struct ftrace_ops *ops) |
176 | { | 189 | { |
177 | int cpu; | 190 | int cpu; |
@@ -230,28 +243,27 @@ static void update_ftrace_function(void) | |||
230 | 243 | ||
231 | /* | 244 | /* |
232 | * If we are at the end of the list and this ops is | 245 | * If we are at the end of the list and this ops is |
233 | * not dynamic, then have the mcount trampoline call | 246 | * recursion safe and not dynamic and the arch supports passing ops, |
234 | * the function directly | 247 | * then have the mcount trampoline call the function directly. |
235 | */ | 248 | */ |
236 | if (ftrace_ops_list == &ftrace_list_end || | 249 | if (ftrace_ops_list == &ftrace_list_end || |
237 | (ftrace_ops_list->next == &ftrace_list_end && | 250 | (ftrace_ops_list->next == &ftrace_list_end && |
238 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) | 251 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && |
252 | (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && | ||
253 | !FTRACE_FORCE_LIST_FUNC)) { | ||
254 | /* Set the ftrace_ops that the arch callback uses */ | ||
255 | if (ftrace_ops_list == &global_ops) | ||
256 | function_trace_op = ftrace_global_list; | ||
257 | else | ||
258 | function_trace_op = ftrace_ops_list; | ||
239 | func = ftrace_ops_list->func; | 259 | func = ftrace_ops_list->func; |
240 | else | 260 | } else { |
261 | /* Just use the default ftrace_ops */ | ||
262 | function_trace_op = &ftrace_list_end; | ||
241 | func = ftrace_ops_list_func; | 263 | func = ftrace_ops_list_func; |
264 | } | ||
242 | 265 | ||
243 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
244 | ftrace_trace_function = func; | 266 | ftrace_trace_function = func; |
245 | #else | ||
246 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
247 | /* do not update till all functions have been modified */ | ||
248 | __ftrace_trace_function_delay = func; | ||
249 | #else | ||
250 | __ftrace_trace_function = func; | ||
251 | #endif | ||
252 | ftrace_trace_function = | ||
253 | (func == ftrace_stub) ? func : ftrace_test_stop_func; | ||
254 | #endif | ||
255 | } | 267 | } |
256 | 268 | ||
257 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) | 269 | static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) |
@@ -325,6 +337,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
325 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) | 337 | if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) |
326 | return -EINVAL; | 338 | return -EINVAL; |
327 | 339 | ||
340 | #ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS | ||
341 | /* | ||
342 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | ||
343 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. | ||
344 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. | ||
345 | */ | ||
346 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && | ||
347 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) | ||
348 | return -EINVAL; | ||
349 | |||
350 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) | ||
351 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; | ||
352 | #endif | ||
353 | |||
328 | if (!core_kernel_data((unsigned long)ops)) | 354 | if (!core_kernel_data((unsigned long)ops)) |
329 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | 355 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
330 | 356 | ||
@@ -773,7 +799,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) | |||
773 | } | 799 | } |
774 | 800 | ||
775 | static void | 801 | static void |
776 | function_profile_call(unsigned long ip, unsigned long parent_ip) | 802 | function_profile_call(unsigned long ip, unsigned long parent_ip, |
803 | struct ftrace_ops *ops, struct pt_regs *regs) | ||
777 | { | 804 | { |
778 | struct ftrace_profile_stat *stat; | 805 | struct ftrace_profile_stat *stat; |
779 | struct ftrace_profile *rec; | 806 | struct ftrace_profile *rec; |
@@ -803,7 +830,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip) | |||
803 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 830 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
804 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | 831 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
805 | { | 832 | { |
806 | function_profile_call(trace->func, 0); | 833 | function_profile_call(trace->func, 0, NULL, NULL); |
807 | return 1; | 834 | return 1; |
808 | } | 835 | } |
809 | 836 | ||
@@ -863,6 +890,7 @@ static void unregister_ftrace_profiler(void) | |||
863 | #else | 890 | #else |
864 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { | 891 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
865 | .func = function_profile_call, | 892 | .func = function_profile_call, |
893 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
866 | }; | 894 | }; |
867 | 895 | ||
868 | static int register_ftrace_profiler(void) | 896 | static int register_ftrace_profiler(void) |
@@ -1045,6 +1073,7 @@ static struct ftrace_ops global_ops = { | |||
1045 | .func = ftrace_stub, | 1073 | .func = ftrace_stub, |
1046 | .notrace_hash = EMPTY_HASH, | 1074 | .notrace_hash = EMPTY_HASH, |
1047 | .filter_hash = EMPTY_HASH, | 1075 | .filter_hash = EMPTY_HASH, |
1076 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
1048 | }; | 1077 | }; |
1049 | 1078 | ||
1050 | static DEFINE_MUTEX(ftrace_regex_lock); | 1079 | static DEFINE_MUTEX(ftrace_regex_lock); |
@@ -1525,6 +1554,12 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
1525 | rec->flags++; | 1554 | rec->flags++; |
1526 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) | 1555 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) |
1527 | return; | 1556 | return; |
1557 | /* | ||
1558 | * If any ops wants regs saved for this function | ||
1559 | * then all ops will get saved regs. | ||
1560 | */ | ||
1561 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) | ||
1562 | rec->flags |= FTRACE_FL_REGS; | ||
1528 | } else { | 1563 | } else { |
1529 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) | 1564 | if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) |
1530 | return; | 1565 | return; |
@@ -1616,18 +1651,59 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
1616 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) | 1651 | if (enable && (rec->flags & ~FTRACE_FL_MASK)) |
1617 | flag = FTRACE_FL_ENABLED; | 1652 | flag = FTRACE_FL_ENABLED; |
1618 | 1653 | ||
1654 | /* | ||
1655 | * If enabling and the REGS flag does not match the REGS_EN, then | ||
1656 | * do not ignore this record. Set flags to fail the compare against | ||
1657 | * ENABLED. | ||
1658 | */ | ||
1659 | if (flag && | ||
1660 | (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN))) | ||
1661 | flag |= FTRACE_FL_REGS; | ||
1662 | |||
1619 | /* If the state of this record hasn't changed, then do nothing */ | 1663 | /* If the state of this record hasn't changed, then do nothing */ |
1620 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | 1664 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
1621 | return FTRACE_UPDATE_IGNORE; | 1665 | return FTRACE_UPDATE_IGNORE; |
1622 | 1666 | ||
1623 | if (flag) { | 1667 | if (flag) { |
1624 | if (update) | 1668 | /* Save off if rec is being enabled (for return value) */ |
1669 | flag ^= rec->flags & FTRACE_FL_ENABLED; | ||
1670 | |||
1671 | if (update) { | ||
1625 | rec->flags |= FTRACE_FL_ENABLED; | 1672 | rec->flags |= FTRACE_FL_ENABLED; |
1626 | return FTRACE_UPDATE_MAKE_CALL; | 1673 | if (flag & FTRACE_FL_REGS) { |
1674 | if (rec->flags & FTRACE_FL_REGS) | ||
1675 | rec->flags |= FTRACE_FL_REGS_EN; | ||
1676 | else | ||
1677 | rec->flags &= ~FTRACE_FL_REGS_EN; | ||
1678 | } | ||
1679 | } | ||
1680 | |||
1681 | /* | ||
1682 | * If this record is being updated from a nop, then | ||
1683 | * return UPDATE_MAKE_CALL. | ||
1684 | * Otherwise, if the EN flag is set, then return | ||
1685 | * UPDATE_MODIFY_CALL_REGS to tell the caller to convert | ||
1686 | * from the non-save regs, to a save regs function. | ||
1687 | * Otherwise, | ||
1688 | * return UPDATE_MODIFY_CALL to tell the caller to convert | ||
1689 | * from the save regs, to a non-save regs function. | ||
1690 | */ | ||
1691 | if (flag & FTRACE_FL_ENABLED) | ||
1692 | return FTRACE_UPDATE_MAKE_CALL; | ||
1693 | else if (rec->flags & FTRACE_FL_REGS_EN) | ||
1694 | return FTRACE_UPDATE_MODIFY_CALL_REGS; | ||
1695 | else | ||
1696 | return FTRACE_UPDATE_MODIFY_CALL; | ||
1627 | } | 1697 | } |
1628 | 1698 | ||
1629 | if (update) | 1699 | if (update) { |
1630 | rec->flags &= ~FTRACE_FL_ENABLED; | 1700 | /* If there's no more users, clear all flags */ |
1701 | if (!(rec->flags & ~FTRACE_FL_MASK)) | ||
1702 | rec->flags = 0; | ||
1703 | else | ||
1704 | /* Just disable the record (keep REGS state) */ | ||
1705 | rec->flags &= ~FTRACE_FL_ENABLED; | ||
1706 | } | ||
1631 | 1707 | ||
1632 | return FTRACE_UPDATE_MAKE_NOP; | 1708 | return FTRACE_UPDATE_MAKE_NOP; |
1633 | } | 1709 | } |
@@ -1662,13 +1738,17 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) | |||
1662 | static int | 1738 | static int |
1663 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | 1739 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
1664 | { | 1740 | { |
1741 | unsigned long ftrace_old_addr; | ||
1665 | unsigned long ftrace_addr; | 1742 | unsigned long ftrace_addr; |
1666 | int ret; | 1743 | int ret; |
1667 | 1744 | ||
1668 | ftrace_addr = (unsigned long)FTRACE_ADDR; | ||
1669 | |||
1670 | ret = ftrace_update_record(rec, enable); | 1745 | ret = ftrace_update_record(rec, enable); |
1671 | 1746 | ||
1747 | if (rec->flags & FTRACE_FL_REGS) | ||
1748 | ftrace_addr = (unsigned long)FTRACE_REGS_ADDR; | ||
1749 | else | ||
1750 | ftrace_addr = (unsigned long)FTRACE_ADDR; | ||
1751 | |||
1672 | switch (ret) { | 1752 | switch (ret) { |
1673 | case FTRACE_UPDATE_IGNORE: | 1753 | case FTRACE_UPDATE_IGNORE: |
1674 | return 0; | 1754 | return 0; |
@@ -1678,6 +1758,15 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
1678 | 1758 | ||
1679 | case FTRACE_UPDATE_MAKE_NOP: | 1759 | case FTRACE_UPDATE_MAKE_NOP: |
1680 | return ftrace_make_nop(NULL, rec, ftrace_addr); | 1760 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
1761 | |||
1762 | case FTRACE_UPDATE_MODIFY_CALL_REGS: | ||
1763 | case FTRACE_UPDATE_MODIFY_CALL: | ||
1764 | if (rec->flags & FTRACE_FL_REGS) | ||
1765 | ftrace_old_addr = (unsigned long)FTRACE_ADDR; | ||
1766 | else | ||
1767 | ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR; | ||
1768 | |||
1769 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); | ||
1681 | } | 1770 | } |
1682 | 1771 | ||
1683 | return -1; /* unknow ftrace bug */ | 1772 | return -1; /* unknow ftrace bug */ |
@@ -1882,16 +1971,6 @@ static void ftrace_run_update_code(int command) | |||
1882 | */ | 1971 | */ |
1883 | arch_ftrace_update_code(command); | 1972 | arch_ftrace_update_code(command); |
1884 | 1973 | ||
1885 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
1886 | /* | ||
1887 | * For archs that call ftrace_test_stop_func(), we must | ||
1888 | * wait till after we update all the function callers | ||
1889 | * before we update the callback. This keeps different | ||
1890 | * ops that record different functions from corrupting | ||
1891 | * each other. | ||
1892 | */ | ||
1893 | __ftrace_trace_function = __ftrace_trace_function_delay; | ||
1894 | #endif | ||
1895 | function_trace_stop--; | 1974 | function_trace_stop--; |
1896 | 1975 | ||
1897 | ret = ftrace_arch_code_modify_post_process(); | 1976 | ret = ftrace_arch_code_modify_post_process(); |
@@ -2441,8 +2520,9 @@ static int t_show(struct seq_file *m, void *v) | |||
2441 | 2520 | ||
2442 | seq_printf(m, "%ps", (void *)rec->ip); | 2521 | seq_printf(m, "%ps", (void *)rec->ip); |
2443 | if (iter->flags & FTRACE_ITER_ENABLED) | 2522 | if (iter->flags & FTRACE_ITER_ENABLED) |
2444 | seq_printf(m, " (%ld)", | 2523 | seq_printf(m, " (%ld)%s", |
2445 | rec->flags & ~FTRACE_FL_MASK); | 2524 | rec->flags & ~FTRACE_FL_MASK, |
2525 | rec->flags & FTRACE_FL_REGS ? " R" : ""); | ||
2446 | seq_printf(m, "\n"); | 2526 | seq_printf(m, "\n"); |
2447 | 2527 | ||
2448 | return 0; | 2528 | return 0; |
@@ -2790,8 +2870,8 @@ static int __init ftrace_mod_cmd_init(void) | |||
2790 | } | 2870 | } |
2791 | device_initcall(ftrace_mod_cmd_init); | 2871 | device_initcall(ftrace_mod_cmd_init); |
2792 | 2872 | ||
2793 | static void | 2873 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
2794 | function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | 2874 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
2795 | { | 2875 | { |
2796 | struct ftrace_func_probe *entry; | 2876 | struct ftrace_func_probe *entry; |
2797 | struct hlist_head *hhd; | 2877 | struct hlist_head *hhd; |
@@ -3162,8 +3242,27 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, | |||
3162 | } | 3242 | } |
3163 | 3243 | ||
3164 | static int | 3244 | static int |
3165 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | 3245 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) |
3166 | int reset, int enable) | 3246 | { |
3247 | struct ftrace_func_entry *entry; | ||
3248 | |||
3249 | if (!ftrace_location(ip)) | ||
3250 | return -EINVAL; | ||
3251 | |||
3252 | if (remove) { | ||
3253 | entry = ftrace_lookup_ip(hash, ip); | ||
3254 | if (!entry) | ||
3255 | return -ENOENT; | ||
3256 | free_hash_entry(hash, entry); | ||
3257 | return 0; | ||
3258 | } | ||
3259 | |||
3260 | return add_hash_entry(hash, ip); | ||
3261 | } | ||
3262 | |||
3263 | static int | ||
3264 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | ||
3265 | unsigned long ip, int remove, int reset, int enable) | ||
3167 | { | 3266 | { |
3168 | struct ftrace_hash **orig_hash; | 3267 | struct ftrace_hash **orig_hash; |
3169 | struct ftrace_hash *hash; | 3268 | struct ftrace_hash *hash; |
@@ -3192,6 +3291,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3192 | ret = -EINVAL; | 3291 | ret = -EINVAL; |
3193 | goto out_regex_unlock; | 3292 | goto out_regex_unlock; |
3194 | } | 3293 | } |
3294 | if (ip) { | ||
3295 | ret = ftrace_match_addr(hash, ip, remove); | ||
3296 | if (ret < 0) | ||
3297 | goto out_regex_unlock; | ||
3298 | } | ||
3195 | 3299 | ||
3196 | mutex_lock(&ftrace_lock); | 3300 | mutex_lock(&ftrace_lock); |
3197 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 3301 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
@@ -3208,6 +3312,37 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3208 | return ret; | 3312 | return ret; |
3209 | } | 3313 | } |
3210 | 3314 | ||
3315 | static int | ||
3316 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, | ||
3317 | int reset, int enable) | ||
3318 | { | ||
3319 | return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); | ||
3320 | } | ||
3321 | |||
3322 | /** | ||
3323 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address | ||
3324 | * @ops - the ops to set the filter with | ||
3325 | * @ip - the address to add to or remove from the filter. | ||
3326 | * @remove - non zero to remove the ip from the filter | ||
3327 | * @reset - non zero to reset all filters before applying this filter. | ||
3328 | * | ||
3329 | * Filters denote which functions should be enabled when tracing is enabled | ||
3330 | * If @ip is NULL, it failes to update filter. | ||
3331 | */ | ||
3332 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, | ||
3333 | int remove, int reset) | ||
3334 | { | ||
3335 | return ftrace_set_addr(ops, ip, remove, reset, 1); | ||
3336 | } | ||
3337 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); | ||
3338 | |||
3339 | static int | ||
3340 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | ||
3341 | int reset, int enable) | ||
3342 | { | ||
3343 | return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); | ||
3344 | } | ||
3345 | |||
3211 | /** | 3346 | /** |
3212 | * ftrace_set_filter - set a function to filter on in ftrace | 3347 | * ftrace_set_filter - set a function to filter on in ftrace |
3213 | * @ops - the ops to set the filter with | 3348 | * @ops - the ops to set the filter with |
@@ -3912,6 +4047,7 @@ void __init ftrace_init(void) | |||
3912 | 4047 | ||
3913 | static struct ftrace_ops global_ops = { | 4048 | static struct ftrace_ops global_ops = { |
3914 | .func = ftrace_stub, | 4049 | .func = ftrace_stub, |
4050 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
3915 | }; | 4051 | }; |
3916 | 4052 | ||
3917 | static int __init ftrace_nodyn_init(void) | 4053 | static int __init ftrace_nodyn_init(void) |
@@ -3942,10 +4078,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | |||
3942 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 4078 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
3943 | 4079 | ||
3944 | static void | 4080 | static void |
3945 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) | 4081 | ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, |
4082 | struct ftrace_ops *op, struct pt_regs *regs) | ||
3946 | { | 4083 | { |
3947 | struct ftrace_ops *op; | ||
3948 | |||
3949 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) | 4084 | if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) |
3950 | return; | 4085 | return; |
3951 | 4086 | ||
@@ -3959,7 +4094,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) | |||
3959 | while (op != &ftrace_list_end) { | 4094 | while (op != &ftrace_list_end) { |
3960 | if (!ftrace_function_local_disabled(op) && | 4095 | if (!ftrace_function_local_disabled(op) && |
3961 | ftrace_ops_test(op, ip)) | 4096 | ftrace_ops_test(op, ip)) |
3962 | op->func(ip, parent_ip); | 4097 | op->func(ip, parent_ip, op, regs); |
3963 | 4098 | ||
3964 | op = rcu_dereference_raw(op->next); | 4099 | op = rcu_dereference_raw(op->next); |
3965 | }; | 4100 | }; |
@@ -3969,13 +4104,18 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) | |||
3969 | 4104 | ||
3970 | static struct ftrace_ops control_ops = { | 4105 | static struct ftrace_ops control_ops = { |
3971 | .func = ftrace_ops_control_func, | 4106 | .func = ftrace_ops_control_func, |
4107 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
3972 | }; | 4108 | }; |
3973 | 4109 | ||
3974 | static void | 4110 | static inline void |
3975 | ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | 4111 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
4112 | struct ftrace_ops *ignored, struct pt_regs *regs) | ||
3976 | { | 4113 | { |
3977 | struct ftrace_ops *op; | 4114 | struct ftrace_ops *op; |
3978 | 4115 | ||
4116 | if (function_trace_stop) | ||
4117 | return; | ||
4118 | |||
3979 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) | 4119 | if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) |
3980 | return; | 4120 | return; |
3981 | 4121 | ||
@@ -3988,13 +4128,39 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) | |||
3988 | op = rcu_dereference_raw(ftrace_ops_list); | 4128 | op = rcu_dereference_raw(ftrace_ops_list); |
3989 | while (op != &ftrace_list_end) { | 4129 | while (op != &ftrace_list_end) { |
3990 | if (ftrace_ops_test(op, ip)) | 4130 | if (ftrace_ops_test(op, ip)) |
3991 | op->func(ip, parent_ip); | 4131 | op->func(ip, parent_ip, op, regs); |
3992 | op = rcu_dereference_raw(op->next); | 4132 | op = rcu_dereference_raw(op->next); |
3993 | }; | 4133 | }; |
3994 | preempt_enable_notrace(); | 4134 | preempt_enable_notrace(); |
3995 | trace_recursion_clear(TRACE_INTERNAL_BIT); | 4135 | trace_recursion_clear(TRACE_INTERNAL_BIT); |
3996 | } | 4136 | } |
3997 | 4137 | ||
4138 | /* | ||
4139 | * Some archs only support passing ip and parent_ip. Even though | ||
4140 | * the list function ignores the op parameter, we do not want any | ||
4141 | * C side effects, where a function is called without the caller | ||
4142 | * sending a third parameter. | ||
4143 | * Archs are to support both the regs and ftrace_ops at the same time. | ||
4144 | * If they support ftrace_ops, it is assumed they support regs. | ||
4145 | * If call backs want to use regs, they must either check for regs | ||
4146 | * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS. | ||
4147 | * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved. | ||
4148 | * An architecture can pass partial regs with ftrace_ops and still | ||
4149 | * set the ARCH_SUPPORT_FTARCE_OPS. | ||
4150 | */ | ||
4151 | #if ARCH_SUPPORTS_FTRACE_OPS | ||
4152 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | ||
4153 | struct ftrace_ops *op, struct pt_regs *regs) | ||
4154 | { | ||
4155 | __ftrace_ops_list_func(ip, parent_ip, NULL, regs); | ||
4156 | } | ||
4157 | #else | ||
4158 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | ||
4159 | { | ||
4160 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); | ||
4161 | } | ||
4162 | #endif | ||
4163 | |||
3998 | static void clear_ftrace_swapper(void) | 4164 | static void clear_ftrace_swapper(void) |
3999 | { | 4165 | { |
4000 | struct task_struct *p; | 4166 | struct task_struct *p; |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 49491fa7daa2..b32ed0e385a5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2816,7 +2816,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | |||
2816 | * to the buffer after this will fail and return NULL. | 2816 | * to the buffer after this will fail and return NULL. |
2817 | * | 2817 | * |
2818 | * This is different than ring_buffer_record_disable() as | 2818 | * This is different than ring_buffer_record_disable() as |
2819 | * it works like an on/off switch, where as the disable() verison | 2819 | * it works like an on/off switch, where as the disable() version |
2820 | * must be paired with a enable(). | 2820 | * must be paired with a enable(). |
2821 | */ | 2821 | */ |
2822 | void ring_buffer_record_off(struct ring_buffer *buffer) | 2822 | void ring_buffer_record_off(struct ring_buffer *buffer) |
@@ -2839,7 +2839,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off); | |||
2839 | * ring_buffer_record_off(). | 2839 | * ring_buffer_record_off(). |
2840 | * | 2840 | * |
2841 | * This is different than ring_buffer_record_enable() as | 2841 | * This is different than ring_buffer_record_enable() as |
2842 | * it works like an on/off switch, where as the enable() verison | 2842 | * it works like an on/off switch, where as the enable() version |
2843 | * must be paired with a disable(). | 2843 | * must be paired with a disable(). |
2844 | */ | 2844 | */ |
2845 | void ring_buffer_record_on(struct ring_buffer *buffer) | 2845 | void ring_buffer_record_on(struct ring_buffer *buffer) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5c38c81496ce..1ec5c1dab629 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -328,7 +328,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
328 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 328 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
329 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 329 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
330 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | | 330 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | |
331 | TRACE_ITER_IRQ_INFO; | 331 | TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; |
332 | 332 | ||
333 | static int trace_stop_count; | 333 | static int trace_stop_count; |
334 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); | 334 | static DEFINE_RAW_SPINLOCK(tracing_start_lock); |
@@ -426,15 +426,15 @@ __setup("trace_buf_size=", set_buf_size); | |||
426 | 426 | ||
427 | static int __init set_tracing_thresh(char *str) | 427 | static int __init set_tracing_thresh(char *str) |
428 | { | 428 | { |
429 | unsigned long threshhold; | 429 | unsigned long threshold; |
430 | int ret; | 430 | int ret; |
431 | 431 | ||
432 | if (!str) | 432 | if (!str) |
433 | return 0; | 433 | return 0; |
434 | ret = strict_strtoul(str, 0, &threshhold); | 434 | ret = strict_strtoul(str, 0, &threshold); |
435 | if (ret < 0) | 435 | if (ret < 0) |
436 | return 0; | 436 | return 0; |
437 | tracing_thresh = threshhold * 1000; | 437 | tracing_thresh = threshold * 1000; |
438 | return 1; | 438 | return 1; |
439 | } | 439 | } |
440 | __setup("tracing_thresh=", set_tracing_thresh); | 440 | __setup("tracing_thresh=", set_tracing_thresh); |
@@ -470,6 +470,7 @@ static const char *trace_options[] = { | |||
470 | "overwrite", | 470 | "overwrite", |
471 | "disable_on_free", | 471 | "disable_on_free", |
472 | "irq-info", | 472 | "irq-info", |
473 | "markers", | ||
473 | NULL | 474 | NULL |
474 | }; | 475 | }; |
475 | 476 | ||
@@ -3886,6 +3887,9 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3886 | if (tracing_disabled) | 3887 | if (tracing_disabled) |
3887 | return -EINVAL; | 3888 | return -EINVAL; |
3888 | 3889 | ||
3890 | if (!(trace_flags & TRACE_ITER_MARKERS)) | ||
3891 | return -EINVAL; | ||
3892 | |||
3889 | if (cnt > TRACE_BUF_SIZE) | 3893 | if (cnt > TRACE_BUF_SIZE) |
3890 | cnt = TRACE_BUF_SIZE; | 3894 | cnt = TRACE_BUF_SIZE; |
3891 | 3895 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 55e1f7f0db12..63a2da0b9a6e 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -472,11 +472,11 @@ extern void trace_find_cmdline(int pid, char comm[]); | |||
472 | 472 | ||
473 | #ifdef CONFIG_DYNAMIC_FTRACE | 473 | #ifdef CONFIG_DYNAMIC_FTRACE |
474 | extern unsigned long ftrace_update_tot_cnt; | 474 | extern unsigned long ftrace_update_tot_cnt; |
475 | #endif | ||
475 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 476 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
476 | extern int DYN_FTRACE_TEST_NAME(void); | 477 | extern int DYN_FTRACE_TEST_NAME(void); |
477 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 | 478 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 |
478 | extern int DYN_FTRACE_TEST_NAME2(void); | 479 | extern int DYN_FTRACE_TEST_NAME2(void); |
479 | #endif | ||
480 | 480 | ||
481 | extern int ring_buffer_expanded; | 481 | extern int ring_buffer_expanded; |
482 | extern bool tracing_selftest_disabled; | 482 | extern bool tracing_selftest_disabled; |
@@ -680,6 +680,7 @@ enum trace_iterator_flags { | |||
680 | TRACE_ITER_OVERWRITE = 0x200000, | 680 | TRACE_ITER_OVERWRITE = 0x200000, |
681 | TRACE_ITER_STOP_ON_FREE = 0x400000, | 681 | TRACE_ITER_STOP_ON_FREE = 0x400000, |
682 | TRACE_ITER_IRQ_INFO = 0x800000, | 682 | TRACE_ITER_IRQ_INFO = 0x800000, |
683 | TRACE_ITER_MARKERS = 0x1000000, | ||
683 | }; | 684 | }; |
684 | 685 | ||
685 | /* | 686 | /* |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 8a6d2ee2086c..84b1e045faba 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); | |||
258 | 258 | ||
259 | #ifdef CONFIG_FUNCTION_TRACER | 259 | #ifdef CONFIG_FUNCTION_TRACER |
260 | static void | 260 | static void |
261 | perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) | 261 | perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, |
262 | struct ftrace_ops *ops, struct pt_regs *pt_regs) | ||
262 | { | 263 | { |
263 | struct ftrace_entry *entry; | 264 | struct ftrace_entry *entry; |
264 | struct hlist_head *head; | 265 | struct hlist_head *head; |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 29111da1d100..d608d09d08c0 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1199,6 +1199,31 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
1199 | return 0; | 1199 | return 0; |
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | static void event_remove(struct ftrace_event_call *call) | ||
1203 | { | ||
1204 | ftrace_event_enable_disable(call, 0); | ||
1205 | if (call->event.funcs) | ||
1206 | __unregister_ftrace_event(&call->event); | ||
1207 | list_del(&call->list); | ||
1208 | } | ||
1209 | |||
1210 | static int event_init(struct ftrace_event_call *call) | ||
1211 | { | ||
1212 | int ret = 0; | ||
1213 | |||
1214 | if (WARN_ON(!call->name)) | ||
1215 | return -EINVAL; | ||
1216 | |||
1217 | if (call->class->raw_init) { | ||
1218 | ret = call->class->raw_init(call); | ||
1219 | if (ret < 0 && ret != -ENOSYS) | ||
1220 | pr_warn("Could not initialize trace events/%s\n", | ||
1221 | call->name); | ||
1222 | } | ||
1223 | |||
1224 | return ret; | ||
1225 | } | ||
1226 | |||
1202 | static int | 1227 | static int |
1203 | __trace_add_event_call(struct ftrace_event_call *call, struct module *mod, | 1228 | __trace_add_event_call(struct ftrace_event_call *call, struct module *mod, |
1204 | const struct file_operations *id, | 1229 | const struct file_operations *id, |
@@ -1209,19 +1234,9 @@ __trace_add_event_call(struct ftrace_event_call *call, struct module *mod, | |||
1209 | struct dentry *d_events; | 1234 | struct dentry *d_events; |
1210 | int ret; | 1235 | int ret; |
1211 | 1236 | ||
1212 | /* The linker may leave blanks */ | 1237 | ret = event_init(call); |
1213 | if (!call->name) | 1238 | if (ret < 0) |
1214 | return -EINVAL; | 1239 | return ret; |
1215 | |||
1216 | if (call->class->raw_init) { | ||
1217 | ret = call->class->raw_init(call); | ||
1218 | if (ret < 0) { | ||
1219 | if (ret != -ENOSYS) | ||
1220 | pr_warning("Could not initialize trace events/%s\n", | ||
1221 | call->name); | ||
1222 | return ret; | ||
1223 | } | ||
1224 | } | ||
1225 | 1240 | ||
1226 | d_events = event_trace_events_dir(); | 1241 | d_events = event_trace_events_dir(); |
1227 | if (!d_events) | 1242 | if (!d_events) |
@@ -1272,13 +1287,10 @@ static void remove_subsystem_dir(const char *name) | |||
1272 | */ | 1287 | */ |
1273 | static void __trace_remove_event_call(struct ftrace_event_call *call) | 1288 | static void __trace_remove_event_call(struct ftrace_event_call *call) |
1274 | { | 1289 | { |
1275 | ftrace_event_enable_disable(call, 0); | 1290 | event_remove(call); |
1276 | if (call->event.funcs) | ||
1277 | __unregister_ftrace_event(&call->event); | ||
1278 | debugfs_remove_recursive(call->dir); | ||
1279 | list_del(&call->list); | ||
1280 | trace_destroy_fields(call); | 1291 | trace_destroy_fields(call); |
1281 | destroy_preds(call); | 1292 | destroy_preds(call); |
1293 | debugfs_remove_recursive(call->dir); | ||
1282 | remove_subsystem_dir(call->class->system); | 1294 | remove_subsystem_dir(call->class->system); |
1283 | } | 1295 | } |
1284 | 1296 | ||
@@ -1450,15 +1462,43 @@ static __init int setup_trace_event(char *str) | |||
1450 | } | 1462 | } |
1451 | __setup("trace_event=", setup_trace_event); | 1463 | __setup("trace_event=", setup_trace_event); |
1452 | 1464 | ||
1465 | static __init int event_trace_enable(void) | ||
1466 | { | ||
1467 | struct ftrace_event_call **iter, *call; | ||
1468 | char *buf = bootup_event_buf; | ||
1469 | char *token; | ||
1470 | int ret; | ||
1471 | |||
1472 | for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { | ||
1473 | |||
1474 | call = *iter; | ||
1475 | ret = event_init(call); | ||
1476 | if (!ret) | ||
1477 | list_add(&call->list, &ftrace_events); | ||
1478 | } | ||
1479 | |||
1480 | while (true) { | ||
1481 | token = strsep(&buf, ","); | ||
1482 | |||
1483 | if (!token) | ||
1484 | break; | ||
1485 | if (!*token) | ||
1486 | continue; | ||
1487 | |||
1488 | ret = ftrace_set_clr_event(token, 1); | ||
1489 | if (ret) | ||
1490 | pr_warn("Failed to enable trace event: %s\n", token); | ||
1491 | } | ||
1492 | return 0; | ||
1493 | } | ||
1494 | |||
1453 | static __init int event_trace_init(void) | 1495 | static __init int event_trace_init(void) |
1454 | { | 1496 | { |
1455 | struct ftrace_event_call **call; | 1497 | struct ftrace_event_call *call; |
1456 | struct dentry *d_tracer; | 1498 | struct dentry *d_tracer; |
1457 | struct dentry *entry; | 1499 | struct dentry *entry; |
1458 | struct dentry *d_events; | 1500 | struct dentry *d_events; |
1459 | int ret; | 1501 | int ret; |
1460 | char *buf = bootup_event_buf; | ||
1461 | char *token; | ||
1462 | 1502 | ||
1463 | d_tracer = tracing_init_dentry(); | 1503 | d_tracer = tracing_init_dentry(); |
1464 | if (!d_tracer) | 1504 | if (!d_tracer) |
@@ -1497,24 +1537,19 @@ static __init int event_trace_init(void) | |||
1497 | if (trace_define_common_fields()) | 1537 | if (trace_define_common_fields()) |
1498 | pr_warning("tracing: Failed to allocate common fields"); | 1538 | pr_warning("tracing: Failed to allocate common fields"); |
1499 | 1539 | ||
1500 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { | 1540 | /* |
1501 | __trace_add_event_call(*call, NULL, &ftrace_event_id_fops, | 1541 | * Early initialization already enabled ftrace event. |
1542 | * Now it's only necessary to create the event directory. | ||
1543 | */ | ||
1544 | list_for_each_entry(call, &ftrace_events, list) { | ||
1545 | |||
1546 | ret = event_create_dir(call, d_events, | ||
1547 | &ftrace_event_id_fops, | ||
1502 | &ftrace_enable_fops, | 1548 | &ftrace_enable_fops, |
1503 | &ftrace_event_filter_fops, | 1549 | &ftrace_event_filter_fops, |
1504 | &ftrace_event_format_fops); | 1550 | &ftrace_event_format_fops); |
1505 | } | 1551 | if (ret < 0) |
1506 | 1552 | event_remove(call); | |
1507 | while (true) { | ||
1508 | token = strsep(&buf, ","); | ||
1509 | |||
1510 | if (!token) | ||
1511 | break; | ||
1512 | if (!*token) | ||
1513 | continue; | ||
1514 | |||
1515 | ret = ftrace_set_clr_event(token, 1); | ||
1516 | if (ret) | ||
1517 | pr_warning("Failed to enable trace event: %s\n", token); | ||
1518 | } | 1553 | } |
1519 | 1554 | ||
1520 | ret = register_module_notifier(&trace_module_nb); | 1555 | ret = register_module_notifier(&trace_module_nb); |
@@ -1523,6 +1558,7 @@ static __init int event_trace_init(void) | |||
1523 | 1558 | ||
1524 | return 0; | 1559 | return 0; |
1525 | } | 1560 | } |
1561 | core_initcall(event_trace_enable); | ||
1526 | fs_initcall(event_trace_init); | 1562 | fs_initcall(event_trace_init); |
1527 | 1563 | ||
1528 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 1564 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
@@ -1646,9 +1682,11 @@ static __init void event_trace_self_tests(void) | |||
1646 | event_test_stuff(); | 1682 | event_test_stuff(); |
1647 | 1683 | ||
1648 | ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0); | 1684 | ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0); |
1649 | if (WARN_ON_ONCE(ret)) | 1685 | if (WARN_ON_ONCE(ret)) { |
1650 | pr_warning("error disabling system %s\n", | 1686 | pr_warning("error disabling system %s\n", |
1651 | system->name); | 1687 | system->name); |
1688 | continue; | ||
1689 | } | ||
1652 | 1690 | ||
1653 | pr_cont("OK\n"); | 1691 | pr_cont("OK\n"); |
1654 | } | 1692 | } |
@@ -1681,7 +1719,8 @@ static __init void event_trace_self_tests(void) | |||
1681 | static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); | 1719 | static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); |
1682 | 1720 | ||
1683 | static void | 1721 | static void |
1684 | function_test_events_call(unsigned long ip, unsigned long parent_ip) | 1722 | function_test_events_call(unsigned long ip, unsigned long parent_ip, |
1723 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
1685 | { | 1724 | { |
1686 | struct ring_buffer_event *event; | 1725 | struct ring_buffer_event *event; |
1687 | struct ring_buffer *buffer; | 1726 | struct ring_buffer *buffer; |
@@ -1720,6 +1759,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1720 | static struct ftrace_ops trace_ops __initdata = | 1759 | static struct ftrace_ops trace_ops __initdata = |
1721 | { | 1760 | { |
1722 | .func = function_test_events_call, | 1761 | .func = function_test_events_call, |
1762 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
1723 | }; | 1763 | }; |
1724 | 1764 | ||
1725 | static __init void event_trace_self_test_with_function(void) | 1765 | static __init void event_trace_self_test_with_function(void) |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 431dba8b7542..c154797a7ff7 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -2002,7 +2002,7 @@ static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter, | |||
2002 | static int __ftrace_function_set_filter(int filter, char *buf, int len, | 2002 | static int __ftrace_function_set_filter(int filter, char *buf, int len, |
2003 | struct function_filter_data *data) | 2003 | struct function_filter_data *data) |
2004 | { | 2004 | { |
2005 | int i, re_cnt, ret; | 2005 | int i, re_cnt, ret = -EINVAL; |
2006 | int *reset; | 2006 | int *reset; |
2007 | char **re; | 2007 | char **re; |
2008 | 2008 | ||
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index a426f410c060..483162a9f908 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -49,7 +49,8 @@ static void function_trace_start(struct trace_array *tr) | |||
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | static void |
52 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | 52 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, |
53 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
53 | { | 54 | { |
54 | struct trace_array *tr = func_trace; | 55 | struct trace_array *tr = func_trace; |
55 | struct trace_array_cpu *data; | 56 | struct trace_array_cpu *data; |
@@ -84,7 +85,9 @@ enum { | |||
84 | static struct tracer_flags func_flags; | 85 | static struct tracer_flags func_flags; |
85 | 86 | ||
86 | static void | 87 | static void |
87 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 88 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
89 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
90 | |||
88 | { | 91 | { |
89 | struct trace_array *tr = func_trace; | 92 | struct trace_array *tr = func_trace; |
90 | struct trace_array_cpu *data; | 93 | struct trace_array_cpu *data; |
@@ -121,7 +124,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
121 | } | 124 | } |
122 | 125 | ||
123 | static void | 126 | static void |
124 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | 127 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
128 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
125 | { | 129 | { |
126 | struct trace_array *tr = func_trace; | 130 | struct trace_array *tr = func_trace; |
127 | struct trace_array_cpu *data; | 131 | struct trace_array_cpu *data; |
@@ -164,13 +168,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
164 | static struct ftrace_ops trace_ops __read_mostly = | 168 | static struct ftrace_ops trace_ops __read_mostly = |
165 | { | 169 | { |
166 | .func = function_trace_call, | 170 | .func = function_trace_call, |
167 | .flags = FTRACE_OPS_FL_GLOBAL, | 171 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
168 | }; | 172 | }; |
169 | 173 | ||
170 | static struct ftrace_ops trace_stack_ops __read_mostly = | 174 | static struct ftrace_ops trace_stack_ops __read_mostly = |
171 | { | 175 | { |
172 | .func = function_stack_trace_call, | 176 | .func = function_stack_trace_call, |
173 | .flags = FTRACE_OPS_FL_GLOBAL, | 177 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
174 | }; | 178 | }; |
175 | 179 | ||
176 | static struct tracer_opt func_opts[] = { | 180 | static struct tracer_opt func_opts[] = { |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index ce27c8ba8d31..99b4378393d5 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -143,7 +143,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | |||
143 | return; | 143 | return; |
144 | } | 144 | } |
145 | 145 | ||
146 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | 146 | #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY) |
147 | /* | 147 | /* |
148 | * The arch may choose to record the frame pointer used | 148 | * The arch may choose to record the frame pointer used |
149 | * and check it here to make sure that it is what we expect it | 149 | * and check it here to make sure that it is what we expect it |
@@ -154,6 +154,9 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | |||
154 | * | 154 | * |
155 | * Currently, x86_32 with optimize for size (-Os) makes the latest | 155 | * Currently, x86_32 with optimize for size (-Os) makes the latest |
156 | * gcc do the above. | 156 | * gcc do the above. |
157 | * | ||
158 | * Note, -mfentry does not use frame pointers, and this test | ||
159 | * is not needed if CC_USING_FENTRY is set. | ||
157 | */ | 160 | */ |
158 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { | 161 | if (unlikely(current->ret_stack[index].fp != frame_pointer)) { |
159 | ftrace_graph_stop(); | 162 | ftrace_graph_stop(); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 99d20e920368..d98ee8283b29 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr, | |||
136 | * irqsoff uses its own tracer function to keep the overhead down: | 136 | * irqsoff uses its own tracer function to keep the overhead down: |
137 | */ | 137 | */ |
138 | static void | 138 | static void |
139 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | 139 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, |
140 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
140 | { | 141 | { |
141 | struct trace_array *tr = irqsoff_trace; | 142 | struct trace_array *tr = irqsoff_trace; |
142 | struct trace_array_cpu *data; | 143 | struct trace_array_cpu *data; |
@@ -153,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
153 | static struct ftrace_ops trace_ops __read_mostly = | 154 | static struct ftrace_ops trace_ops __read_mostly = |
154 | { | 155 | { |
155 | .func = irqsoff_tracer_call, | 156 | .func = irqsoff_tracer_call, |
156 | .flags = FTRACE_OPS_FL_GLOBAL, | 157 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
157 | }; | 158 | }; |
158 | #endif /* CONFIG_FUNCTION_TRACER */ | 159 | #endif /* CONFIG_FUNCTION_TRACER */ |
159 | 160 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index ff791ea48b57..02170c00c413 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -108,7 +108,8 @@ out_enable: | |||
108 | * wakeup uses its own tracer function to keep the overhead down: | 108 | * wakeup uses its own tracer function to keep the overhead down: |
109 | */ | 109 | */ |
110 | static void | 110 | static void |
111 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | 111 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, |
112 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
112 | { | 113 | { |
113 | struct trace_array *tr = wakeup_trace; | 114 | struct trace_array *tr = wakeup_trace; |
114 | struct trace_array_cpu *data; | 115 | struct trace_array_cpu *data; |
@@ -129,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
129 | static struct ftrace_ops trace_ops __read_mostly = | 130 | static struct ftrace_ops trace_ops __read_mostly = |
130 | { | 131 | { |
131 | .func = wakeup_tracer_call, | 132 | .func = wakeup_tracer_call, |
132 | .flags = FTRACE_OPS_FL_GLOBAL, | 133 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
133 | }; | 134 | }; |
134 | #endif /* CONFIG_FUNCTION_TRACER */ | 135 | #endif /* CONFIG_FUNCTION_TRACER */ |
135 | 136 | ||
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 288541f977fb..2c00a691a540 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -103,54 +103,67 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | |||
103 | 103 | ||
104 | static int trace_selftest_test_probe1_cnt; | 104 | static int trace_selftest_test_probe1_cnt; |
105 | static void trace_selftest_test_probe1_func(unsigned long ip, | 105 | static void trace_selftest_test_probe1_func(unsigned long ip, |
106 | unsigned long pip) | 106 | unsigned long pip, |
107 | struct ftrace_ops *op, | ||
108 | struct pt_regs *pt_regs) | ||
107 | { | 109 | { |
108 | trace_selftest_test_probe1_cnt++; | 110 | trace_selftest_test_probe1_cnt++; |
109 | } | 111 | } |
110 | 112 | ||
111 | static int trace_selftest_test_probe2_cnt; | 113 | static int trace_selftest_test_probe2_cnt; |
112 | static void trace_selftest_test_probe2_func(unsigned long ip, | 114 | static void trace_selftest_test_probe2_func(unsigned long ip, |
113 | unsigned long pip) | 115 | unsigned long pip, |
116 | struct ftrace_ops *op, | ||
117 | struct pt_regs *pt_regs) | ||
114 | { | 118 | { |
115 | trace_selftest_test_probe2_cnt++; | 119 | trace_selftest_test_probe2_cnt++; |
116 | } | 120 | } |
117 | 121 | ||
118 | static int trace_selftest_test_probe3_cnt; | 122 | static int trace_selftest_test_probe3_cnt; |
119 | static void trace_selftest_test_probe3_func(unsigned long ip, | 123 | static void trace_selftest_test_probe3_func(unsigned long ip, |
120 | unsigned long pip) | 124 | unsigned long pip, |
125 | struct ftrace_ops *op, | ||
126 | struct pt_regs *pt_regs) | ||
121 | { | 127 | { |
122 | trace_selftest_test_probe3_cnt++; | 128 | trace_selftest_test_probe3_cnt++; |
123 | } | 129 | } |
124 | 130 | ||
125 | static int trace_selftest_test_global_cnt; | 131 | static int trace_selftest_test_global_cnt; |
126 | static void trace_selftest_test_global_func(unsigned long ip, | 132 | static void trace_selftest_test_global_func(unsigned long ip, |
127 | unsigned long pip) | 133 | unsigned long pip, |
134 | struct ftrace_ops *op, | ||
135 | struct pt_regs *pt_regs) | ||
128 | { | 136 | { |
129 | trace_selftest_test_global_cnt++; | 137 | trace_selftest_test_global_cnt++; |
130 | } | 138 | } |
131 | 139 | ||
132 | static int trace_selftest_test_dyn_cnt; | 140 | static int trace_selftest_test_dyn_cnt; |
133 | static void trace_selftest_test_dyn_func(unsigned long ip, | 141 | static void trace_selftest_test_dyn_func(unsigned long ip, |
134 | unsigned long pip) | 142 | unsigned long pip, |
143 | struct ftrace_ops *op, | ||
144 | struct pt_regs *pt_regs) | ||
135 | { | 145 | { |
136 | trace_selftest_test_dyn_cnt++; | 146 | trace_selftest_test_dyn_cnt++; |
137 | } | 147 | } |
138 | 148 | ||
139 | static struct ftrace_ops test_probe1 = { | 149 | static struct ftrace_ops test_probe1 = { |
140 | .func = trace_selftest_test_probe1_func, | 150 | .func = trace_selftest_test_probe1_func, |
151 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
141 | }; | 152 | }; |
142 | 153 | ||
143 | static struct ftrace_ops test_probe2 = { | 154 | static struct ftrace_ops test_probe2 = { |
144 | .func = trace_selftest_test_probe2_func, | 155 | .func = trace_selftest_test_probe2_func, |
156 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
145 | }; | 157 | }; |
146 | 158 | ||
147 | static struct ftrace_ops test_probe3 = { | 159 | static struct ftrace_ops test_probe3 = { |
148 | .func = trace_selftest_test_probe3_func, | 160 | .func = trace_selftest_test_probe3_func, |
161 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
149 | }; | 162 | }; |
150 | 163 | ||
151 | static struct ftrace_ops test_global = { | 164 | static struct ftrace_ops test_global = { |
152 | .func = trace_selftest_test_global_func, | 165 | .func = trace_selftest_test_global_func, |
153 | .flags = FTRACE_OPS_FL_GLOBAL, | 166 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
154 | }; | 167 | }; |
155 | 168 | ||
156 | static void print_counts(void) | 169 | static void print_counts(void) |
@@ -393,10 +406,253 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
393 | 406 | ||
394 | return ret; | 407 | return ret; |
395 | } | 408 | } |
409 | |||
410 | static int trace_selftest_recursion_cnt; | ||
411 | static void trace_selftest_test_recursion_func(unsigned long ip, | ||
412 | unsigned long pip, | ||
413 | struct ftrace_ops *op, | ||
414 | struct pt_regs *pt_regs) | ||
415 | { | ||
416 | /* | ||
417 | * This function is registered without the recursion safe flag. | ||
418 | * The ftrace infrastructure should provide the recursion | ||
419 | * protection. If not, this will crash the kernel! | ||
420 | */ | ||
421 | trace_selftest_recursion_cnt++; | ||
422 | DYN_FTRACE_TEST_NAME(); | ||
423 | } | ||
424 | |||
425 | static void trace_selftest_test_recursion_safe_func(unsigned long ip, | ||
426 | unsigned long pip, | ||
427 | struct ftrace_ops *op, | ||
428 | struct pt_regs *pt_regs) | ||
429 | { | ||
430 | /* | ||
431 | * We said we would provide our own recursion. By calling | ||
432 | * this function again, we should recurse back into this function | ||
433 | * and count again. But this only happens if the arch supports | ||
434 | * all of ftrace features and nothing else is using the function | ||
435 | * tracing utility. | ||
436 | */ | ||
437 | if (trace_selftest_recursion_cnt++) | ||
438 | return; | ||
439 | DYN_FTRACE_TEST_NAME(); | ||
440 | } | ||
441 | |||
442 | static struct ftrace_ops test_rec_probe = { | ||
443 | .func = trace_selftest_test_recursion_func, | ||
444 | }; | ||
445 | |||
446 | static struct ftrace_ops test_recsafe_probe = { | ||
447 | .func = trace_selftest_test_recursion_safe_func, | ||
448 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
449 | }; | ||
450 | |||
451 | static int | ||
452 | trace_selftest_function_recursion(void) | ||
453 | { | ||
454 | int save_ftrace_enabled = ftrace_enabled; | ||
455 | int save_tracer_enabled = tracer_enabled; | ||
456 | char *func_name; | ||
457 | int len; | ||
458 | int ret; | ||
459 | int cnt; | ||
460 | |||
461 | /* The previous test PASSED */ | ||
462 | pr_cont("PASSED\n"); | ||
463 | pr_info("Testing ftrace recursion: "); | ||
464 | |||
465 | |||
466 | /* enable tracing, and record the filter function */ | ||
467 | ftrace_enabled = 1; | ||
468 | tracer_enabled = 1; | ||
469 | |||
470 | /* Handle PPC64 '.' name */ | ||
471 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | ||
472 | len = strlen(func_name); | ||
473 | |||
474 | ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); | ||
475 | if (ret) { | ||
476 | pr_cont("*Could not set filter* "); | ||
477 | goto out; | ||
478 | } | ||
479 | |||
480 | ret = register_ftrace_function(&test_rec_probe); | ||
481 | if (ret) { | ||
482 | pr_cont("*could not register callback* "); | ||
483 | goto out; | ||
484 | } | ||
485 | |||
486 | DYN_FTRACE_TEST_NAME(); | ||
487 | |||
488 | unregister_ftrace_function(&test_rec_probe); | ||
489 | |||
490 | ret = -1; | ||
491 | if (trace_selftest_recursion_cnt != 1) { | ||
492 | pr_cont("*callback not called once (%d)* ", | ||
493 | trace_selftest_recursion_cnt); | ||
494 | goto out; | ||
495 | } | ||
496 | |||
497 | trace_selftest_recursion_cnt = 1; | ||
498 | |||
499 | pr_cont("PASSED\n"); | ||
500 | pr_info("Testing ftrace recursion safe: "); | ||
501 | |||
502 | ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); | ||
503 | if (ret) { | ||
504 | pr_cont("*Could not set filter* "); | ||
505 | goto out; | ||
506 | } | ||
507 | |||
508 | ret = register_ftrace_function(&test_recsafe_probe); | ||
509 | if (ret) { | ||
510 | pr_cont("*could not register callback* "); | ||
511 | goto out; | ||
512 | } | ||
513 | |||
514 | DYN_FTRACE_TEST_NAME(); | ||
515 | |||
516 | unregister_ftrace_function(&test_recsafe_probe); | ||
517 | |||
518 | /* | ||
519 | * If arch supports all ftrace features, and no other task | ||
520 | * was on the list, we should be fine. | ||
521 | */ | ||
522 | if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC) | ||
523 | cnt = 2; /* Should have recursed */ | ||
524 | else | ||
525 | cnt = 1; | ||
526 | |||
527 | ret = -1; | ||
528 | if (trace_selftest_recursion_cnt != cnt) { | ||
529 | pr_cont("*callback not called expected %d times (%d)* ", | ||
530 | cnt, trace_selftest_recursion_cnt); | ||
531 | goto out; | ||
532 | } | ||
533 | |||
534 | ret = 0; | ||
535 | out: | ||
536 | ftrace_enabled = save_ftrace_enabled; | ||
537 | tracer_enabled = save_tracer_enabled; | ||
538 | |||
539 | return ret; | ||
540 | } | ||
396 | #else | 541 | #else |
397 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | 542 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) |
543 | # define trace_selftest_function_recursion() ({ 0; }) | ||
398 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 544 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
399 | 545 | ||
546 | static enum { | ||
547 | TRACE_SELFTEST_REGS_START, | ||
548 | TRACE_SELFTEST_REGS_FOUND, | ||
549 | TRACE_SELFTEST_REGS_NOT_FOUND, | ||
550 | } trace_selftest_regs_stat; | ||
551 | |||
552 | static void trace_selftest_test_regs_func(unsigned long ip, | ||
553 | unsigned long pip, | ||
554 | struct ftrace_ops *op, | ||
555 | struct pt_regs *pt_regs) | ||
556 | { | ||
557 | if (pt_regs) | ||
558 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; | ||
559 | else | ||
560 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; | ||
561 | } | ||
562 | |||
563 | static struct ftrace_ops test_regs_probe = { | ||
564 | .func = trace_selftest_test_regs_func, | ||
565 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, | ||
566 | }; | ||
567 | |||
568 | static int | ||
569 | trace_selftest_function_regs(void) | ||
570 | { | ||
571 | int save_ftrace_enabled = ftrace_enabled; | ||
572 | int save_tracer_enabled = tracer_enabled; | ||
573 | char *func_name; | ||
574 | int len; | ||
575 | int ret; | ||
576 | int supported = 0; | ||
577 | |||
578 | #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS | ||
579 | supported = 1; | ||
580 | #endif | ||
581 | |||
582 | /* The previous test PASSED */ | ||
583 | pr_cont("PASSED\n"); | ||
584 | pr_info("Testing ftrace regs%s: ", | ||
585 | !supported ? "(no arch support)" : ""); | ||
586 | |||
587 | /* enable tracing, and record the filter function */ | ||
588 | ftrace_enabled = 1; | ||
589 | tracer_enabled = 1; | ||
590 | |||
591 | /* Handle PPC64 '.' name */ | ||
592 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); | ||
593 | len = strlen(func_name); | ||
594 | |||
595 | ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); | ||
596 | /* | ||
597 | * If DYNAMIC_FTRACE is not set, then we just trace all functions. | ||
598 | * This test really doesn't care. | ||
599 | */ | ||
600 | if (ret && ret != -ENODEV) { | ||
601 | pr_cont("*Could not set filter* "); | ||
602 | goto out; | ||
603 | } | ||
604 | |||
605 | ret = register_ftrace_function(&test_regs_probe); | ||
606 | /* | ||
607 | * Now if the arch does not support passing regs, then this should | ||
608 | * have failed. | ||
609 | */ | ||
610 | if (!supported) { | ||
611 | if (!ret) { | ||
612 | pr_cont("*registered save-regs without arch support* "); | ||
613 | goto out; | ||
614 | } | ||
615 | test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; | ||
616 | ret = register_ftrace_function(&test_regs_probe); | ||
617 | } | ||
618 | if (ret) { | ||
619 | pr_cont("*could not register callback* "); | ||
620 | goto out; | ||
621 | } | ||
622 | |||
623 | |||
624 | DYN_FTRACE_TEST_NAME(); | ||
625 | |||
626 | unregister_ftrace_function(&test_regs_probe); | ||
627 | |||
628 | ret = -1; | ||
629 | |||
630 | switch (trace_selftest_regs_stat) { | ||
631 | case TRACE_SELFTEST_REGS_START: | ||
632 | pr_cont("*callback never called* "); | ||
633 | goto out; | ||
634 | |||
635 | case TRACE_SELFTEST_REGS_FOUND: | ||
636 | if (supported) | ||
637 | break; | ||
638 | pr_cont("*callback received regs without arch support* "); | ||
639 | goto out; | ||
640 | |||
641 | case TRACE_SELFTEST_REGS_NOT_FOUND: | ||
642 | if (!supported) | ||
643 | break; | ||
644 | pr_cont("*callback received NULL regs* "); | ||
645 | goto out; | ||
646 | } | ||
647 | |||
648 | ret = 0; | ||
649 | out: | ||
650 | ftrace_enabled = save_ftrace_enabled; | ||
651 | tracer_enabled = save_tracer_enabled; | ||
652 | |||
653 | return ret; | ||
654 | } | ||
655 | |||
400 | /* | 656 | /* |
401 | * Simple verification test of ftrace function tracer. | 657 | * Simple verification test of ftrace function tracer. |
402 | * Enable ftrace, sleep 1/10 second, and then read the trace | 658 | * Enable ftrace, sleep 1/10 second, and then read the trace |
@@ -442,7 +698,14 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
442 | 698 | ||
443 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, | 699 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
444 | DYN_FTRACE_TEST_NAME); | 700 | DYN_FTRACE_TEST_NAME); |
701 | if (ret) | ||
702 | goto out; | ||
445 | 703 | ||
704 | ret = trace_selftest_function_recursion(); | ||
705 | if (ret) | ||
706 | goto out; | ||
707 | |||
708 | ret = trace_selftest_function_regs(); | ||
446 | out: | 709 | out: |
447 | ftrace_enabled = save_ftrace_enabled; | 710 | ftrace_enabled = save_ftrace_enabled; |
448 | tracer_enabled = save_tracer_enabled; | 711 | tracer_enabled = save_tracer_enabled; |
@@ -778,6 +1041,8 @@ static int trace_wakeup_test_thread(void *data) | |||
778 | set_current_state(TASK_INTERRUPTIBLE); | 1041 | set_current_state(TASK_INTERRUPTIBLE); |
779 | schedule(); | 1042 | schedule(); |
780 | 1043 | ||
1044 | complete(x); | ||
1045 | |||
781 | /* we are awake, now wait to disappear */ | 1046 | /* we are awake, now wait to disappear */ |
782 | while (!kthread_should_stop()) { | 1047 | while (!kthread_should_stop()) { |
783 | /* | 1048 | /* |
@@ -821,24 +1086,21 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
821 | /* reset the max latency */ | 1086 | /* reset the max latency */ |
822 | tracing_max_latency = 0; | 1087 | tracing_max_latency = 0; |
823 | 1088 | ||
824 | /* sleep to let the RT thread sleep too */ | 1089 | while (p->on_rq) { |
825 | msleep(100); | 1090 | /* |
1091 | * Sleep to make sure the RT thread is asleep too. | ||
1092 | * On virtual machines we can't rely on timings, | ||
1093 | * but we want to make sure this test still works. | ||
1094 | */ | ||
1095 | msleep(100); | ||
1096 | } | ||
826 | 1097 | ||
827 | /* | 1098 | init_completion(&isrt); |
828 | * Yes this is slightly racy. It is possible that for some | ||
829 | * strange reason that the RT thread we created, did not | ||
830 | * call schedule for 100ms after doing the completion, | ||
831 | * and we do a wakeup on a task that already is awake. | ||
832 | * But that is extremely unlikely, and the worst thing that | ||
833 | * happens in such a case, is that we disable tracing. | ||
834 | * Honestly, if this race does happen something is horrible | ||
835 | * wrong with the system. | ||
836 | */ | ||
837 | 1099 | ||
838 | wake_up_process(p); | 1100 | wake_up_process(p); |
839 | 1101 | ||
840 | /* give a little time to let the thread wake up */ | 1102 | /* Wait for the task to wake up */ |
841 | msleep(100); | 1103 | wait_for_completion(&isrt); |
842 | 1104 | ||
843 | /* stop the tracing. */ | 1105 | /* stop the tracing. */ |
844 | tracing_stop(); | 1106 | tracing_stop(); |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index d4545f49242e..0c1b165778e5 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -111,7 +111,8 @@ static inline void check_stack(void) | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static void | 113 | static void |
114 | stack_trace_call(unsigned long ip, unsigned long parent_ip) | 114 | stack_trace_call(unsigned long ip, unsigned long parent_ip, |
115 | struct ftrace_ops *op, struct pt_regs *pt_regs) | ||
115 | { | 116 | { |
116 | int cpu; | 117 | int cpu; |
117 | 118 | ||
@@ -136,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
136 | static struct ftrace_ops trace_ops __read_mostly = | 137 | static struct ftrace_ops trace_ops __read_mostly = |
137 | { | 138 | { |
138 | .func = stack_trace_call, | 139 | .func = stack_trace_call, |
140 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | ||
139 | }; | 141 | }; |
140 | 142 | ||
141 | static ssize_t | 143 | static ssize_t |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 60e4d7875672..2485a7d09b11 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -487,7 +487,7 @@ int __init init_ftrace_syscalls(void) | |||
487 | 487 | ||
488 | return 0; | 488 | return 0; |
489 | } | 489 | } |
490 | core_initcall(init_ftrace_syscalls); | 490 | early_initcall(init_ftrace_syscalls); |
491 | 491 | ||
492 | #ifdef CONFIG_PERF_EVENTS | 492 | #ifdef CONFIG_PERF_EVENTS |
493 | 493 | ||
@@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
506 | int size; | 506 | int size; |
507 | 507 | ||
508 | syscall_nr = syscall_get_nr(current, regs); | 508 | syscall_nr = syscall_get_nr(current, regs); |
509 | if (syscall_nr < 0) | ||
510 | return; | ||
509 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) | 511 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
510 | return; | 512 | return; |
511 | 513 | ||
@@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
580 | int size; | 582 | int size; |
581 | 583 | ||
582 | syscall_nr = syscall_get_nr(current, regs); | 584 | syscall_nr = syscall_get_nr(current, regs); |
585 | if (syscall_nr < 0) | ||
586 | return; | ||
583 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) | 587 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
584 | return; | 588 | return; |
585 | 589 | ||
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 4b1dfba70f7c..9d4c8d5a1f53 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/notifier.h> | 22 | #include <linux/notifier.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/sysctl.h> | 24 | #include <linux/sysctl.h> |
25 | #include <linux/smpboot.h> | ||
25 | 26 | ||
26 | #include <asm/irq_regs.h> | 27 | #include <asm/irq_regs.h> |
27 | #include <linux/kvm_para.h> | 28 | #include <linux/kvm_para.h> |
@@ -29,16 +30,18 @@ | |||
29 | 30 | ||
30 | int watchdog_enabled = 1; | 31 | int watchdog_enabled = 1; |
31 | int __read_mostly watchdog_thresh = 10; | 32 | int __read_mostly watchdog_thresh = 10; |
33 | static int __read_mostly watchdog_disabled; | ||
32 | 34 | ||
33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | 35 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
34 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | 36 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
35 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); | 37 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); |
36 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); | 38 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
37 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); | 39 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
40 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); | ||
41 | static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); | ||
38 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 42 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
39 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); | 43 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
40 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | 44 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); |
41 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); | ||
42 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | 45 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 46 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
44 | #endif | 47 | #endif |
@@ -248,13 +251,15 @@ static void watchdog_overflow_callback(struct perf_event *event, | |||
248 | __this_cpu_write(hard_watchdog_warn, false); | 251 | __this_cpu_write(hard_watchdog_warn, false); |
249 | return; | 252 | return; |
250 | } | 253 | } |
254 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | ||
255 | |||
251 | static void watchdog_interrupt_count(void) | 256 | static void watchdog_interrupt_count(void) |
252 | { | 257 | { |
253 | __this_cpu_inc(hrtimer_interrupts); | 258 | __this_cpu_inc(hrtimer_interrupts); |
254 | } | 259 | } |
255 | #else | 260 | |
256 | static inline void watchdog_interrupt_count(void) { return; } | 261 | static int watchdog_nmi_enable(unsigned int cpu); |
257 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | 262 | static void watchdog_nmi_disable(unsigned int cpu); |
258 | 263 | ||
259 | /* watchdog kicker functions */ | 264 | /* watchdog kicker functions */ |
260 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | 265 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
@@ -327,49 +332,68 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
327 | return HRTIMER_RESTART; | 332 | return HRTIMER_RESTART; |
328 | } | 333 | } |
329 | 334 | ||
335 | static void watchdog_set_prio(unsigned int policy, unsigned int prio) | ||
336 | { | ||
337 | struct sched_param param = { .sched_priority = prio }; | ||
330 | 338 | ||
331 | /* | 339 | sched_setscheduler(current, policy, ¶m); |
332 | * The watchdog thread - touches the timestamp. | 340 | } |
333 | */ | 341 | |
334 | static int watchdog(void *unused) | 342 | static void watchdog_enable(unsigned int cpu) |
335 | { | 343 | { |
336 | struct sched_param param = { .sched_priority = 0 }; | ||
337 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | 344 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
338 | 345 | ||
339 | /* initialize timestamp */ | 346 | if (!watchdog_enabled) { |
340 | __touch_watchdog(); | 347 | kthread_park(current); |
348 | return; | ||
349 | } | ||
350 | |||
351 | /* Enable the perf event */ | ||
352 | watchdog_nmi_enable(cpu); | ||
341 | 353 | ||
342 | /* kick off the timer for the hardlockup detector */ | 354 | /* kick off the timer for the hardlockup detector */ |
355 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
356 | hrtimer->function = watchdog_timer_fn; | ||
357 | |||
343 | /* done here because hrtimer_start can only pin to smp_processor_id() */ | 358 | /* done here because hrtimer_start can only pin to smp_processor_id() */ |
344 | hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), | 359 | hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), |
345 | HRTIMER_MODE_REL_PINNED); | 360 | HRTIMER_MODE_REL_PINNED); |
346 | 361 | ||
347 | set_current_state(TASK_INTERRUPTIBLE); | 362 | /* initialize timestamp */ |
348 | /* | 363 | watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); |
349 | * Run briefly (kicked by the hrtimer callback function) once every | 364 | __touch_watchdog(); |
350 | * get_sample_period() seconds (4 seconds by default) to reset the | 365 | } |
351 | * softlockup timestamp. If this gets delayed for more than | ||
352 | * 2*watchdog_thresh seconds then the debug-printout triggers in | ||
353 | * watchdog_timer_fn(). | ||
354 | */ | ||
355 | while (!kthread_should_stop()) { | ||
356 | __touch_watchdog(); | ||
357 | schedule(); | ||
358 | 366 | ||
359 | if (kthread_should_stop()) | 367 | static void watchdog_disable(unsigned int cpu) |
360 | break; | 368 | { |
369 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | ||
361 | 370 | ||
362 | set_current_state(TASK_INTERRUPTIBLE); | 371 | watchdog_set_prio(SCHED_NORMAL, 0); |
363 | } | 372 | hrtimer_cancel(hrtimer); |
364 | /* | 373 | /* disable the perf event */ |
365 | * Drop the policy/priority elevation during thread exit to avoid a | 374 | watchdog_nmi_disable(cpu); |
366 | * scheduling latency spike. | ||
367 | */ | ||
368 | __set_current_state(TASK_RUNNING); | ||
369 | sched_setscheduler(current, SCHED_NORMAL, ¶m); | ||
370 | return 0; | ||
371 | } | 375 | } |
372 | 376 | ||
377 | static int watchdog_should_run(unsigned int cpu) | ||
378 | { | ||
379 | return __this_cpu_read(hrtimer_interrupts) != | ||
380 | __this_cpu_read(soft_lockup_hrtimer_cnt); | ||
381 | } | ||
382 | |||
383 | /* | ||
384 | * The watchdog thread function - touches the timestamp. | ||
385 | * | ||
386 | * It only runs once every get_sample_period() seconds (4 seconds by | ||
387 | * default) to reset the softlockup timestamp. If this gets delayed | ||
388 | * for more than 2*watchdog_thresh seconds then the debug-printout | ||
389 | * triggers in watchdog_timer_fn(). | ||
390 | */ | ||
391 | static void watchdog(unsigned int cpu) | ||
392 | { | ||
393 | __this_cpu_write(soft_lockup_hrtimer_cnt, | ||
394 | __this_cpu_read(hrtimer_interrupts)); | ||
395 | __touch_watchdog(); | ||
396 | } | ||
373 | 397 | ||
374 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 398 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
375 | /* | 399 | /* |
@@ -379,7 +403,7 @@ static int watchdog(void *unused) | |||
379 | */ | 403 | */ |
380 | static unsigned long cpu0_err; | 404 | static unsigned long cpu0_err; |
381 | 405 | ||
382 | static int watchdog_nmi_enable(int cpu) | 406 | static int watchdog_nmi_enable(unsigned int cpu) |
383 | { | 407 | { |
384 | struct perf_event_attr *wd_attr; | 408 | struct perf_event_attr *wd_attr; |
385 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | 409 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
@@ -433,7 +457,7 @@ out: | |||
433 | return 0; | 457 | return 0; |
434 | } | 458 | } |
435 | 459 | ||
436 | static void watchdog_nmi_disable(int cpu) | 460 | static void watchdog_nmi_disable(unsigned int cpu) |
437 | { | 461 | { |
438 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | 462 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
439 | 463 | ||
@@ -447,107 +471,35 @@ static void watchdog_nmi_disable(int cpu) | |||
447 | return; | 471 | return; |
448 | } | 472 | } |
449 | #else | 473 | #else |
450 | static int watchdog_nmi_enable(int cpu) { return 0; } | 474 | static int watchdog_nmi_enable(unsigned int cpu) { return 0; } |
451 | static void watchdog_nmi_disable(int cpu) { return; } | 475 | static void watchdog_nmi_disable(unsigned int cpu) { return; } |
452 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | 476 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
453 | 477 | ||
454 | /* prepare/enable/disable routines */ | 478 | /* prepare/enable/disable routines */ |
455 | static void watchdog_prepare_cpu(int cpu) | ||
456 | { | ||
457 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | ||
458 | |||
459 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); | ||
460 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
461 | hrtimer->function = watchdog_timer_fn; | ||
462 | } | ||
463 | |||
464 | static int watchdog_enable(int cpu) | ||
465 | { | ||
466 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | ||
467 | int err = 0; | ||
468 | |||
469 | /* enable the perf event */ | ||
470 | err = watchdog_nmi_enable(cpu); | ||
471 | |||
472 | /* Regardless of err above, fall through and start softlockup */ | ||
473 | |||
474 | /* create the watchdog thread */ | ||
475 | if (!p) { | ||
476 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | ||
477 | p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu); | ||
478 | if (IS_ERR(p)) { | ||
479 | pr_err("softlockup watchdog for %i failed\n", cpu); | ||
480 | if (!err) { | ||
481 | /* if hardlockup hasn't already set this */ | ||
482 | err = PTR_ERR(p); | ||
483 | /* and disable the perf event */ | ||
484 | watchdog_nmi_disable(cpu); | ||
485 | } | ||
486 | goto out; | ||
487 | } | ||
488 | sched_setscheduler(p, SCHED_FIFO, ¶m); | ||
489 | kthread_bind(p, cpu); | ||
490 | per_cpu(watchdog_touch_ts, cpu) = 0; | ||
491 | per_cpu(softlockup_watchdog, cpu) = p; | ||
492 | wake_up_process(p); | ||
493 | } | ||
494 | |||
495 | out: | ||
496 | return err; | ||
497 | } | ||
498 | |||
499 | static void watchdog_disable(int cpu) | ||
500 | { | ||
501 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | ||
502 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | ||
503 | |||
504 | /* | ||
505 | * cancel the timer first to stop incrementing the stats | ||
506 | * and waking up the kthread | ||
507 | */ | ||
508 | hrtimer_cancel(hrtimer); | ||
509 | |||
510 | /* disable the perf event */ | ||
511 | watchdog_nmi_disable(cpu); | ||
512 | |||
513 | /* stop the watchdog thread */ | ||
514 | if (p) { | ||
515 | per_cpu(softlockup_watchdog, cpu) = NULL; | ||
516 | kthread_stop(p); | ||
517 | } | ||
518 | } | ||
519 | |||
520 | /* sysctl functions */ | 479 | /* sysctl functions */ |
521 | #ifdef CONFIG_SYSCTL | 480 | #ifdef CONFIG_SYSCTL |
522 | static void watchdog_enable_all_cpus(void) | 481 | static void watchdog_enable_all_cpus(void) |
523 | { | 482 | { |
524 | int cpu; | 483 | unsigned int cpu; |
525 | |||
526 | watchdog_enabled = 0; | ||
527 | |||
528 | for_each_online_cpu(cpu) | ||
529 | if (!watchdog_enable(cpu)) | ||
530 | /* if any cpu succeeds, watchdog is considered | ||
531 | enabled for the system */ | ||
532 | watchdog_enabled = 1; | ||
533 | |||
534 | if (!watchdog_enabled) | ||
535 | pr_err("failed to be enabled on some cpus\n"); | ||
536 | 484 | ||
485 | if (watchdog_disabled) { | ||
486 | watchdog_disabled = 0; | ||
487 | for_each_online_cpu(cpu) | ||
488 | kthread_unpark(per_cpu(softlockup_watchdog, cpu)); | ||
489 | } | ||
537 | } | 490 | } |
538 | 491 | ||
539 | static void watchdog_disable_all_cpus(void) | 492 | static void watchdog_disable_all_cpus(void) |
540 | { | 493 | { |
541 | int cpu; | 494 | unsigned int cpu; |
542 | |||
543 | for_each_online_cpu(cpu) | ||
544 | watchdog_disable(cpu); | ||
545 | 495 | ||
546 | /* if all watchdogs are disabled, then they are disabled for the system */ | 496 | if (!watchdog_disabled) { |
547 | watchdog_enabled = 0; | 497 | watchdog_disabled = 1; |
498 | for_each_online_cpu(cpu) | ||
499 | kthread_park(per_cpu(softlockup_watchdog, cpu)); | ||
500 | } | ||
548 | } | 501 | } |
549 | 502 | ||
550 | |||
551 | /* | 503 | /* |
552 | * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh | 504 | * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh |
553 | */ | 505 | */ |
@@ -557,73 +509,36 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
557 | { | 509 | { |
558 | int ret; | 510 | int ret; |
559 | 511 | ||
512 | if (watchdog_disabled < 0) | ||
513 | return -ENODEV; | ||
514 | |||
560 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 515 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
561 | if (ret || !write) | 516 | if (ret || !write) |
562 | goto out; | 517 | return ret; |
563 | 518 | ||
564 | if (watchdog_enabled && watchdog_thresh) | 519 | if (watchdog_enabled && watchdog_thresh) |
565 | watchdog_enable_all_cpus(); | 520 | watchdog_enable_all_cpus(); |
566 | else | 521 | else |
567 | watchdog_disable_all_cpus(); | 522 | watchdog_disable_all_cpus(); |
568 | 523 | ||
569 | out: | ||
570 | return ret; | 524 | return ret; |
571 | } | 525 | } |
572 | #endif /* CONFIG_SYSCTL */ | 526 | #endif /* CONFIG_SYSCTL */ |
573 | 527 | ||
574 | 528 | static struct smp_hotplug_thread watchdog_threads = { | |
575 | /* | 529 | .store = &softlockup_watchdog, |
576 | * Create/destroy watchdog threads as CPUs come and go: | 530 | .thread_should_run = watchdog_should_run, |
577 | */ | 531 | .thread_fn = watchdog, |
578 | static int __cpuinit | 532 | .thread_comm = "watchdog/%u", |
579 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 533 | .setup = watchdog_enable, |
580 | { | 534 | .park = watchdog_disable, |
581 | int hotcpu = (unsigned long)hcpu; | 535 | .unpark = watchdog_enable, |
582 | |||
583 | switch (action) { | ||
584 | case CPU_UP_PREPARE: | ||
585 | case CPU_UP_PREPARE_FROZEN: | ||
586 | watchdog_prepare_cpu(hotcpu); | ||
587 | break; | ||
588 | case CPU_ONLINE: | ||
589 | case CPU_ONLINE_FROZEN: | ||
590 | if (watchdog_enabled) | ||
591 | watchdog_enable(hotcpu); | ||
592 | break; | ||
593 | #ifdef CONFIG_HOTPLUG_CPU | ||
594 | case CPU_UP_CANCELED: | ||
595 | case CPU_UP_CANCELED_FROZEN: | ||
596 | watchdog_disable(hotcpu); | ||
597 | break; | ||
598 | case CPU_DEAD: | ||
599 | case CPU_DEAD_FROZEN: | ||
600 | watchdog_disable(hotcpu); | ||
601 | break; | ||
602 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | * hardlockup and softlockup are not important enough | ||
607 | * to block cpu bring up. Just always succeed and | ||
608 | * rely on printk output to flag problems. | ||
609 | */ | ||
610 | return NOTIFY_OK; | ||
611 | } | ||
612 | |||
613 | static struct notifier_block __cpuinitdata cpu_nfb = { | ||
614 | .notifier_call = cpu_callback | ||
615 | }; | 536 | }; |
616 | 537 | ||
617 | void __init lockup_detector_init(void) | 538 | void __init lockup_detector_init(void) |
618 | { | 539 | { |
619 | void *cpu = (void *)(long)smp_processor_id(); | 540 | if (smpboot_register_percpu_thread(&watchdog_threads)) { |
620 | int err; | 541 | pr_err("Failed to create watchdog threads, disabled\n"); |
621 | 542 | watchdog_disabled = -ENODEV; | |
622 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 543 | } |
623 | WARN_ON(notifier_to_errno(err)); | ||
624 | |||
625 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | ||
626 | register_cpu_notifier(&cpu_nfb); | ||
627 | |||
628 | return; | ||
629 | } | 544 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 692d97628a10..3c5a79e2134c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -66,6 +66,7 @@ enum { | |||
66 | 66 | ||
67 | /* pool flags */ | 67 | /* pool flags */ |
68 | POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ | 68 | POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ |
69 | POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ | ||
69 | 70 | ||
70 | /* worker flags */ | 71 | /* worker flags */ |
71 | WORKER_STARTED = 1 << 0, /* started */ | 72 | WORKER_STARTED = 1 << 0, /* started */ |
@@ -652,7 +653,7 @@ static bool need_to_manage_workers(struct worker_pool *pool) | |||
652 | /* Do we have too many workers and should some go away? */ | 653 | /* Do we have too many workers and should some go away? */ |
653 | static bool too_many_workers(struct worker_pool *pool) | 654 | static bool too_many_workers(struct worker_pool *pool) |
654 | { | 655 | { |
655 | bool managing = mutex_is_locked(&pool->manager_mutex); | 656 | bool managing = pool->flags & POOL_MANAGING_WORKERS; |
656 | int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ | 657 | int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ |
657 | int nr_busy = pool->nr_workers - nr_idle; | 658 | int nr_busy = pool->nr_workers - nr_idle; |
658 | 659 | ||
@@ -1326,6 +1327,15 @@ static void idle_worker_rebind(struct worker *worker) | |||
1326 | 1327 | ||
1327 | /* we did our part, wait for rebind_workers() to finish up */ | 1328 | /* we did our part, wait for rebind_workers() to finish up */ |
1328 | wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND)); | 1329 | wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND)); |
1330 | |||
1331 | /* | ||
1332 | * rebind_workers() shouldn't finish until all workers passed the | ||
1333 | * above WORKER_REBIND wait. Tell it when done. | ||
1334 | */ | ||
1335 | spin_lock_irq(&worker->pool->gcwq->lock); | ||
1336 | if (!--worker->idle_rebind->cnt) | ||
1337 | complete(&worker->idle_rebind->done); | ||
1338 | spin_unlock_irq(&worker->pool->gcwq->lock); | ||
1329 | } | 1339 | } |
1330 | 1340 | ||
1331 | /* | 1341 | /* |
@@ -1339,8 +1349,16 @@ static void busy_worker_rebind_fn(struct work_struct *work) | |||
1339 | struct worker *worker = container_of(work, struct worker, rebind_work); | 1349 | struct worker *worker = container_of(work, struct worker, rebind_work); |
1340 | struct global_cwq *gcwq = worker->pool->gcwq; | 1350 | struct global_cwq *gcwq = worker->pool->gcwq; |
1341 | 1351 | ||
1342 | if (worker_maybe_bind_and_lock(worker)) | 1352 | worker_maybe_bind_and_lock(worker); |
1343 | worker_clr_flags(worker, WORKER_REBIND); | 1353 | |
1354 | /* | ||
1355 | * %WORKER_REBIND must be cleared even if the above binding failed; | ||
1356 | * otherwise, we may confuse the next CPU_UP cycle or oops / get | ||
1357 | * stuck by calling idle_worker_rebind() prematurely. If CPU went | ||
1358 | * down again inbetween, %WORKER_UNBOUND would be set, so clearing | ||
1359 | * %WORKER_REBIND is always safe. | ||
1360 | */ | ||
1361 | worker_clr_flags(worker, WORKER_REBIND); | ||
1344 | 1362 | ||
1345 | spin_unlock_irq(&gcwq->lock); | 1363 | spin_unlock_irq(&gcwq->lock); |
1346 | } | 1364 | } |
@@ -1396,12 +1414,15 @@ retry: | |||
1396 | /* set REBIND and kick idle ones, we'll wait for these later */ | 1414 | /* set REBIND and kick idle ones, we'll wait for these later */ |
1397 | for_each_worker_pool(pool, gcwq) { | 1415 | for_each_worker_pool(pool, gcwq) { |
1398 | list_for_each_entry(worker, &pool->idle_list, entry) { | 1416 | list_for_each_entry(worker, &pool->idle_list, entry) { |
1417 | unsigned long worker_flags = worker->flags; | ||
1418 | |||
1399 | if (worker->flags & WORKER_REBIND) | 1419 | if (worker->flags & WORKER_REBIND) |
1400 | continue; | 1420 | continue; |
1401 | 1421 | ||
1402 | /* morph UNBOUND to REBIND */ | 1422 | /* morph UNBOUND to REBIND atomically */ |
1403 | worker->flags &= ~WORKER_UNBOUND; | 1423 | worker_flags &= ~WORKER_UNBOUND; |
1404 | worker->flags |= WORKER_REBIND; | 1424 | worker_flags |= WORKER_REBIND; |
1425 | ACCESS_ONCE(worker->flags) = worker_flags; | ||
1405 | 1426 | ||
1406 | idle_rebind.cnt++; | 1427 | idle_rebind.cnt++; |
1407 | worker->idle_rebind = &idle_rebind; | 1428 | worker->idle_rebind = &idle_rebind; |
@@ -1419,25 +1440,15 @@ retry: | |||
1419 | goto retry; | 1440 | goto retry; |
1420 | } | 1441 | } |
1421 | 1442 | ||
1422 | /* | 1443 | /* all idle workers are rebound, rebind busy workers */ |
1423 | * All idle workers are rebound and waiting for %WORKER_REBIND to | ||
1424 | * be cleared inside idle_worker_rebind(). Clear and release. | ||
1425 | * Clearing %WORKER_REBIND from this foreign context is safe | ||
1426 | * because these workers are still guaranteed to be idle. | ||
1427 | */ | ||
1428 | for_each_worker_pool(pool, gcwq) | ||
1429 | list_for_each_entry(worker, &pool->idle_list, entry) | ||
1430 | worker->flags &= ~WORKER_REBIND; | ||
1431 | |||
1432 | wake_up_all(&gcwq->rebind_hold); | ||
1433 | |||
1434 | /* rebind busy workers */ | ||
1435 | for_each_busy_worker(worker, i, pos, gcwq) { | 1444 | for_each_busy_worker(worker, i, pos, gcwq) { |
1436 | struct work_struct *rebind_work = &worker->rebind_work; | 1445 | struct work_struct *rebind_work = &worker->rebind_work; |
1446 | unsigned long worker_flags = worker->flags; | ||
1437 | 1447 | ||
1438 | /* morph UNBOUND to REBIND */ | 1448 | /* morph UNBOUND to REBIND atomically */ |
1439 | worker->flags &= ~WORKER_UNBOUND; | 1449 | worker_flags &= ~WORKER_UNBOUND; |
1440 | worker->flags |= WORKER_REBIND; | 1450 | worker_flags |= WORKER_REBIND; |
1451 | ACCESS_ONCE(worker->flags) = worker_flags; | ||
1441 | 1452 | ||
1442 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, | 1453 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, |
1443 | work_data_bits(rebind_work))) | 1454 | work_data_bits(rebind_work))) |
@@ -1449,6 +1460,34 @@ retry: | |||
1449 | worker->scheduled.next, | 1460 | worker->scheduled.next, |
1450 | work_color_to_flags(WORK_NO_COLOR)); | 1461 | work_color_to_flags(WORK_NO_COLOR)); |
1451 | } | 1462 | } |
1463 | |||
1464 | /* | ||
1465 | * All idle workers are rebound and waiting for %WORKER_REBIND to | ||
1466 | * be cleared inside idle_worker_rebind(). Clear and release. | ||
1467 | * Clearing %WORKER_REBIND from this foreign context is safe | ||
1468 | * because these workers are still guaranteed to be idle. | ||
1469 | * | ||
1470 | * We need to make sure all idle workers passed WORKER_REBIND wait | ||
1471 | * in idle_worker_rebind() before returning; otherwise, workers can | ||
1472 | * get stuck at the wait if hotplug cycle repeats. | ||
1473 | */ | ||
1474 | idle_rebind.cnt = 1; | ||
1475 | INIT_COMPLETION(idle_rebind.done); | ||
1476 | |||
1477 | for_each_worker_pool(pool, gcwq) { | ||
1478 | list_for_each_entry(worker, &pool->idle_list, entry) { | ||
1479 | worker->flags &= ~WORKER_REBIND; | ||
1480 | idle_rebind.cnt++; | ||
1481 | } | ||
1482 | } | ||
1483 | |||
1484 | wake_up_all(&gcwq->rebind_hold); | ||
1485 | |||
1486 | if (--idle_rebind.cnt) { | ||
1487 | spin_unlock_irq(&gcwq->lock); | ||
1488 | wait_for_completion(&idle_rebind.done); | ||
1489 | spin_lock_irq(&gcwq->lock); | ||
1490 | } | ||
1452 | } | 1491 | } |
1453 | 1492 | ||
1454 | static struct worker *alloc_worker(void) | 1493 | static struct worker *alloc_worker(void) |
@@ -1794,9 +1833,45 @@ static bool manage_workers(struct worker *worker) | |||
1794 | struct worker_pool *pool = worker->pool; | 1833 | struct worker_pool *pool = worker->pool; |
1795 | bool ret = false; | 1834 | bool ret = false; |
1796 | 1835 | ||
1797 | if (!mutex_trylock(&pool->manager_mutex)) | 1836 | if (pool->flags & POOL_MANAGING_WORKERS) |
1798 | return ret; | 1837 | return ret; |
1799 | 1838 | ||
1839 | pool->flags |= POOL_MANAGING_WORKERS; | ||
1840 | |||
1841 | /* | ||
1842 | * To simplify both worker management and CPU hotplug, hold off | ||
1843 | * management while hotplug is in progress. CPU hotplug path can't | ||
1844 | * grab %POOL_MANAGING_WORKERS to achieve this because that can | ||
1845 | * lead to idle worker depletion (all become busy thinking someone | ||
1846 | * else is managing) which in turn can result in deadlock under | ||
1847 | * extreme circumstances. Use @pool->manager_mutex to synchronize | ||
1848 | * manager against CPU hotplug. | ||
1849 | * | ||
1850 | * manager_mutex would always be free unless CPU hotplug is in | ||
1851 | * progress. trylock first without dropping @gcwq->lock. | ||
1852 | */ | ||
1853 | if (unlikely(!mutex_trylock(&pool->manager_mutex))) { | ||
1854 | spin_unlock_irq(&pool->gcwq->lock); | ||
1855 | mutex_lock(&pool->manager_mutex); | ||
1856 | /* | ||
1857 | * CPU hotplug could have happened while we were waiting | ||
1858 | * for manager_mutex. Hotplug itself can't handle us | ||
1859 | * because manager isn't either on idle or busy list, and | ||
1860 | * @gcwq's state and ours could have deviated. | ||
1861 | * | ||
1862 | * As hotplug is now excluded via manager_mutex, we can | ||
1863 | * simply try to bind. It will succeed or fail depending | ||
1864 | * on @gcwq's current state. Try it and adjust | ||
1865 | * %WORKER_UNBOUND accordingly. | ||
1866 | */ | ||
1867 | if (worker_maybe_bind_and_lock(worker)) | ||
1868 | worker->flags &= ~WORKER_UNBOUND; | ||
1869 | else | ||
1870 | worker->flags |= WORKER_UNBOUND; | ||
1871 | |||
1872 | ret = true; | ||
1873 | } | ||
1874 | |||
1800 | pool->flags &= ~POOL_MANAGE_WORKERS; | 1875 | pool->flags &= ~POOL_MANAGE_WORKERS; |
1801 | 1876 | ||
1802 | /* | 1877 | /* |
@@ -1806,6 +1881,7 @@ static bool manage_workers(struct worker *worker) | |||
1806 | ret |= maybe_destroy_workers(pool); | 1881 | ret |= maybe_destroy_workers(pool); |
1807 | ret |= maybe_create_worker(pool); | 1882 | ret |= maybe_create_worker(pool); |
1808 | 1883 | ||
1884 | pool->flags &= ~POOL_MANAGING_WORKERS; | ||
1809 | mutex_unlock(&pool->manager_mutex); | 1885 | mutex_unlock(&pool->manager_mutex); |
1810 | return ret; | 1886 | return ret; |
1811 | } | 1887 | } |
@@ -3500,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, | |||
3500 | #ifdef CONFIG_SMP | 3576 | #ifdef CONFIG_SMP |
3501 | 3577 | ||
3502 | struct work_for_cpu { | 3578 | struct work_for_cpu { |
3503 | struct completion completion; | 3579 | struct work_struct work; |
3504 | long (*fn)(void *); | 3580 | long (*fn)(void *); |
3505 | void *arg; | 3581 | void *arg; |
3506 | long ret; | 3582 | long ret; |
3507 | }; | 3583 | }; |
3508 | 3584 | ||
3509 | static int do_work_for_cpu(void *_wfc) | 3585 | static void work_for_cpu_fn(struct work_struct *work) |
3510 | { | 3586 | { |
3511 | struct work_for_cpu *wfc = _wfc; | 3587 | struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); |
3588 | |||
3512 | wfc->ret = wfc->fn(wfc->arg); | 3589 | wfc->ret = wfc->fn(wfc->arg); |
3513 | complete(&wfc->completion); | ||
3514 | return 0; | ||
3515 | } | 3590 | } |
3516 | 3591 | ||
3517 | /** | 3592 | /** |
@@ -3526,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc) | |||
3526 | */ | 3601 | */ |
3527 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 3602 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
3528 | { | 3603 | { |
3529 | struct task_struct *sub_thread; | 3604 | struct work_for_cpu wfc = { .fn = fn, .arg = arg }; |
3530 | struct work_for_cpu wfc = { | ||
3531 | .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), | ||
3532 | .fn = fn, | ||
3533 | .arg = arg, | ||
3534 | }; | ||
3535 | 3605 | ||
3536 | sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); | 3606 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
3537 | if (IS_ERR(sub_thread)) | 3607 | schedule_work_on(cpu, &wfc.work); |
3538 | return PTR_ERR(sub_thread); | 3608 | flush_work(&wfc.work); |
3539 | kthread_bind(sub_thread, cpu); | ||
3540 | wake_up_process(sub_thread); | ||
3541 | wait_for_completion(&wfc.completion); | ||
3542 | return wfc.ret; | 3609 | return wfc.ret; |
3543 | } | 3610 | } |
3544 | EXPORT_SYMBOL_GPL(work_on_cpu); | 3611 | EXPORT_SYMBOL_GPL(work_on_cpu); |