aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-05-24 00:32:30 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-24 00:32:30 -0400
commit54e5c4def0614ab540fbdf68e45342a4af141702 (patch)
tree95a2f61c72336932e83d9e4180cd9739106d624b /kernel
parentbe65de7174123e02477bd488db1a657caf0f9947 (diff)
parent1ee1ceafb572f1a925809168267a7962a4289de8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/bonding/bond_alb.c drivers/net/ethernet/altera/altera_msgdma.c drivers/net/ethernet/altera/altera_sgdma.c net/ipv6/xfrm6_output.c Several cases of overlapping changes. The xfrm6_output.c has a bug fix which overlaps the renaming of skb->local_df to skb->ignore_df. In the Altera TSE driver cases, the register access cleanups in net-next overlapped with bug fixes done in net. Similarly a bug fix to send ALB packets in the bonding driver using the right source address overlaps with cleanups in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c10
-rw-r--r--kernel/cgroup_freezer.c116
-rw-r--r--kernel/context_tracking.c2
-rw-r--r--kernel/events/core.c174
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--kernel/locking/lockdep.c2
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/sched/core.c25
-rw-r--r--kernel/sched/cpudeadline.c4
-rw-r--r--kernel/sched/cpupri.c3
-rw-r--r--kernel/sched/cputime.c32
-rw-r--r--kernel/sched/deadline.c5
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/tracepoint.c4
-rw-r--r--kernel/workqueue.c36
17 files changed, 236 insertions, 211 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9fcdaa705b6c..3f1ca934a237 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -348,7 +348,7 @@ struct cgrp_cset_link {
348 * reference-counted, to improve performance when child cgroups 348 * reference-counted, to improve performance when child cgroups
349 * haven't been created. 349 * haven't been created.
350 */ 350 */
351static struct css_set init_css_set = { 351struct css_set init_css_set = {
352 .refcount = ATOMIC_INIT(1), 352 .refcount = ATOMIC_INIT(1),
353 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), 353 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
354 .tasks = LIST_HEAD_INIT(init_css_set.tasks), 354 .tasks = LIST_HEAD_INIT(init_css_set.tasks),
@@ -1495,7 +1495,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1495 */ 1495 */
1496 if (!use_task_css_set_links) 1496 if (!use_task_css_set_links)
1497 cgroup_enable_task_cg_lists(); 1497 cgroup_enable_task_cg_lists();
1498retry: 1498
1499 mutex_lock(&cgroup_tree_mutex); 1499 mutex_lock(&cgroup_tree_mutex);
1500 mutex_lock(&cgroup_mutex); 1500 mutex_lock(&cgroup_mutex);
1501 1501
@@ -1503,7 +1503,7 @@ retry:
1503 ret = parse_cgroupfs_options(data, &opts); 1503 ret = parse_cgroupfs_options(data, &opts);
1504 if (ret) 1504 if (ret)
1505 goto out_unlock; 1505 goto out_unlock;
1506 1506retry:
1507 /* look for a matching existing root */ 1507 /* look for a matching existing root */
1508 if (!opts.subsys_mask && !opts.none && !opts.name) { 1508 if (!opts.subsys_mask && !opts.none && !opts.name) {
1509 cgrp_dfl_root_visible = true; 1509 cgrp_dfl_root_visible = true;
@@ -1562,9 +1562,9 @@ retry:
1562 if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { 1562 if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
1563 mutex_unlock(&cgroup_mutex); 1563 mutex_unlock(&cgroup_mutex);
1564 mutex_unlock(&cgroup_tree_mutex); 1564 mutex_unlock(&cgroup_tree_mutex);
1565 kfree(opts.release_agent);
1566 kfree(opts.name);
1567 msleep(10); 1565 msleep(10);
1566 mutex_lock(&cgroup_tree_mutex);
1567 mutex_lock(&cgroup_mutex);
1568 goto retry; 1568 goto retry;
1569 } 1569 }
1570 1570
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 2bc4a2256444..345628c78b5b 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include <linux/mutex.h>
24 25
25/* 26/*
26 * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is 27 * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is
@@ -42,9 +43,10 @@ enum freezer_state_flags {
42struct freezer { 43struct freezer {
43 struct cgroup_subsys_state css; 44 struct cgroup_subsys_state css;
44 unsigned int state; 45 unsigned int state;
45 spinlock_t lock;
46}; 46};
47 47
48static DEFINE_MUTEX(freezer_mutex);
49
48static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) 50static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
49{ 51{
50 return css ? container_of(css, struct freezer, css) : NULL; 52 return css ? container_of(css, struct freezer, css) : NULL;
@@ -93,7 +95,6 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css)
93 if (!freezer) 95 if (!freezer)
94 return ERR_PTR(-ENOMEM); 96 return ERR_PTR(-ENOMEM);
95 97
96 spin_lock_init(&freezer->lock);
97 return &freezer->css; 98 return &freezer->css;
98} 99}
99 100
@@ -110,14 +111,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
110 struct freezer *freezer = css_freezer(css); 111 struct freezer *freezer = css_freezer(css);
111 struct freezer *parent = parent_freezer(freezer); 112 struct freezer *parent = parent_freezer(freezer);
112 113
113 /* 114 mutex_lock(&freezer_mutex);
114 * The following double locking and freezing state inheritance
115 * guarantee that @cgroup can never escape ancestors' freezing
116 * states. See css_for_each_descendant_pre() for details.
117 */
118 if (parent)
119 spin_lock_irq(&parent->lock);
120 spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING);
121 115
122 freezer->state |= CGROUP_FREEZER_ONLINE; 116 freezer->state |= CGROUP_FREEZER_ONLINE;
123 117
@@ -126,10 +120,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
126 atomic_inc(&system_freezing_cnt); 120 atomic_inc(&system_freezing_cnt);
127 } 121 }
128 122
129 spin_unlock(&freezer->lock); 123 mutex_unlock(&freezer_mutex);
130 if (parent)
131 spin_unlock_irq(&parent->lock);
132
133 return 0; 124 return 0;
134} 125}
135 126
@@ -144,14 +135,14 @@ static void freezer_css_offline(struct cgroup_subsys_state *css)
144{ 135{
145 struct freezer *freezer = css_freezer(css); 136 struct freezer *freezer = css_freezer(css);
146 137
147 spin_lock_irq(&freezer->lock); 138 mutex_lock(&freezer_mutex);
148 139
149 if (freezer->state & CGROUP_FREEZING) 140 if (freezer->state & CGROUP_FREEZING)
150 atomic_dec(&system_freezing_cnt); 141 atomic_dec(&system_freezing_cnt);
151 142
152 freezer->state = 0; 143 freezer->state = 0;
153 144
154 spin_unlock_irq(&freezer->lock); 145 mutex_unlock(&freezer_mutex);
155} 146}
156 147
157static void freezer_css_free(struct cgroup_subsys_state *css) 148static void freezer_css_free(struct cgroup_subsys_state *css)
@@ -175,7 +166,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
175 struct task_struct *task; 166 struct task_struct *task;
176 bool clear_frozen = false; 167 bool clear_frozen = false;
177 168
178 spin_lock_irq(&freezer->lock); 169 mutex_lock(&freezer_mutex);
179 170
180 /* 171 /*
181 * Make the new tasks conform to the current state of @new_css. 172 * Make the new tasks conform to the current state of @new_css.
@@ -197,21 +188,13 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
197 } 188 }
198 } 189 }
199 190
200 spin_unlock_irq(&freezer->lock); 191 /* propagate FROZEN clearing upwards */
201
202 /*
203 * Propagate FROZEN clearing upwards. We may race with
204 * update_if_frozen(), but as long as both work bottom-up, either
205 * update_if_frozen() sees child's FROZEN cleared or we clear the
206 * parent's FROZEN later. No parent w/ !FROZEN children can be
207 * left FROZEN.
208 */
209 while (clear_frozen && (freezer = parent_freezer(freezer))) { 192 while (clear_frozen && (freezer = parent_freezer(freezer))) {
210 spin_lock_irq(&freezer->lock);
211 freezer->state &= ~CGROUP_FROZEN; 193 freezer->state &= ~CGROUP_FROZEN;
212 clear_frozen = freezer->state & CGROUP_FREEZING; 194 clear_frozen = freezer->state & CGROUP_FREEZING;
213 spin_unlock_irq(&freezer->lock);
214 } 195 }
196
197 mutex_unlock(&freezer_mutex);
215} 198}
216 199
217/** 200/**
@@ -228,9 +211,6 @@ static void freezer_fork(struct task_struct *task)
228{ 211{
229 struct freezer *freezer; 212 struct freezer *freezer;
230 213
231 rcu_read_lock();
232 freezer = task_freezer(task);
233
234 /* 214 /*
235 * The root cgroup is non-freezable, so we can skip locking the 215 * The root cgroup is non-freezable, so we can skip locking the
236 * freezer. This is safe regardless of race with task migration. 216 * freezer. This is safe regardless of race with task migration.
@@ -238,24 +218,18 @@ static void freezer_fork(struct task_struct *task)
238 * to do. If we lost and root is the new cgroup, noop is still the 218 * to do. If we lost and root is the new cgroup, noop is still the
239 * right thing to do. 219 * right thing to do.
240 */ 220 */
241 if (!parent_freezer(freezer)) 221 if (task_css_is_root(task, freezer_cgrp_id))
242 goto out; 222 return;
243 223
244 /* 224 mutex_lock(&freezer_mutex);
245 * Grab @freezer->lock and freeze @task after verifying @task still 225 rcu_read_lock();
246 * belongs to @freezer and it's freezing. The former is for the 226
247 * case where we have raced against task migration and lost and 227 freezer = task_freezer(task);
248 * @task is already in a different cgroup which may not be frozen. 228 if (freezer->state & CGROUP_FREEZING)
249 * This isn't strictly necessary as freeze_task() is allowed to be
250 * called spuriously but let's do it anyway for, if nothing else,
251 * documentation.
252 */
253 spin_lock_irq(&freezer->lock);
254 if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING))
255 freeze_task(task); 229 freeze_task(task);
256 spin_unlock_irq(&freezer->lock); 230
257out:
258 rcu_read_unlock(); 231 rcu_read_unlock();
232 mutex_unlock(&freezer_mutex);
259} 233}
260 234
261/** 235/**
@@ -281,22 +255,24 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
281 struct css_task_iter it; 255 struct css_task_iter it;
282 struct task_struct *task; 256 struct task_struct *task;
283 257
284 WARN_ON_ONCE(!rcu_read_lock_held()); 258 lockdep_assert_held(&freezer_mutex);
285
286 spin_lock_irq(&freezer->lock);
287 259
288 if (!(freezer->state & CGROUP_FREEZING) || 260 if (!(freezer->state & CGROUP_FREEZING) ||
289 (freezer->state & CGROUP_FROZEN)) 261 (freezer->state & CGROUP_FROZEN))
290 goto out_unlock; 262 return;
291 263
292 /* are all (live) children frozen? */ 264 /* are all (live) children frozen? */
265 rcu_read_lock();
293 css_for_each_child(pos, css) { 266 css_for_each_child(pos, css) {
294 struct freezer *child = css_freezer(pos); 267 struct freezer *child = css_freezer(pos);
295 268
296 if ((child->state & CGROUP_FREEZER_ONLINE) && 269 if ((child->state & CGROUP_FREEZER_ONLINE) &&
297 !(child->state & CGROUP_FROZEN)) 270 !(child->state & CGROUP_FROZEN)) {
298 goto out_unlock; 271 rcu_read_unlock();
272 return;
273 }
299 } 274 }
275 rcu_read_unlock();
300 276
301 /* are all tasks frozen? */ 277 /* are all tasks frozen? */
302 css_task_iter_start(css, &it); 278 css_task_iter_start(css, &it);
@@ -317,21 +293,29 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
317 freezer->state |= CGROUP_FROZEN; 293 freezer->state |= CGROUP_FROZEN;
318out_iter_end: 294out_iter_end:
319 css_task_iter_end(&it); 295 css_task_iter_end(&it);
320out_unlock:
321 spin_unlock_irq(&freezer->lock);
322} 296}
323 297
324static int freezer_read(struct seq_file *m, void *v) 298static int freezer_read(struct seq_file *m, void *v)
325{ 299{
326 struct cgroup_subsys_state *css = seq_css(m), *pos; 300 struct cgroup_subsys_state *css = seq_css(m), *pos;
327 301
302 mutex_lock(&freezer_mutex);
328 rcu_read_lock(); 303 rcu_read_lock();
329 304
330 /* update states bottom-up */ 305 /* update states bottom-up */
331 css_for_each_descendant_post(pos, css) 306 css_for_each_descendant_post(pos, css) {
307 if (!css_tryget(pos))
308 continue;
309 rcu_read_unlock();
310
332 update_if_frozen(pos); 311 update_if_frozen(pos);
333 312
313 rcu_read_lock();
314 css_put(pos);
315 }
316
334 rcu_read_unlock(); 317 rcu_read_unlock();
318 mutex_unlock(&freezer_mutex);
335 319
336 seq_puts(m, freezer_state_strs(css_freezer(css)->state)); 320 seq_puts(m, freezer_state_strs(css_freezer(css)->state));
337 seq_putc(m, '\n'); 321 seq_putc(m, '\n');
@@ -373,7 +357,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
373 unsigned int state) 357 unsigned int state)
374{ 358{
375 /* also synchronizes against task migration, see freezer_attach() */ 359 /* also synchronizes against task migration, see freezer_attach() */
376 lockdep_assert_held(&freezer->lock); 360 lockdep_assert_held(&freezer_mutex);
377 361
378 if (!(freezer->state & CGROUP_FREEZER_ONLINE)) 362 if (!(freezer->state & CGROUP_FREEZER_ONLINE))
379 return; 363 return;
@@ -414,31 +398,29 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
414 * descendant will try to inherit its parent's FREEZING state as 398 * descendant will try to inherit its parent's FREEZING state as
415 * CGROUP_FREEZING_PARENT. 399 * CGROUP_FREEZING_PARENT.
416 */ 400 */
401 mutex_lock(&freezer_mutex);
417 rcu_read_lock(); 402 rcu_read_lock();
418 css_for_each_descendant_pre(pos, &freezer->css) { 403 css_for_each_descendant_pre(pos, &freezer->css) {
419 struct freezer *pos_f = css_freezer(pos); 404 struct freezer *pos_f = css_freezer(pos);
420 struct freezer *parent = parent_freezer(pos_f); 405 struct freezer *parent = parent_freezer(pos_f);
421 406
422 spin_lock_irq(&pos_f->lock); 407 if (!css_tryget(pos))
408 continue;
409 rcu_read_unlock();
423 410
424 if (pos_f == freezer) { 411 if (pos_f == freezer)
425 freezer_apply_state(pos_f, freeze, 412 freezer_apply_state(pos_f, freeze,
426 CGROUP_FREEZING_SELF); 413 CGROUP_FREEZING_SELF);
427 } else { 414 else
428 /*
429 * Our update to @parent->state is already visible
430 * which is all we need. No need to lock @parent.
431 * For more info on synchronization, see
432 * freezer_post_create().
433 */
434 freezer_apply_state(pos_f, 415 freezer_apply_state(pos_f,
435 parent->state & CGROUP_FREEZING, 416 parent->state & CGROUP_FREEZING,
436 CGROUP_FREEZING_PARENT); 417 CGROUP_FREEZING_PARENT);
437 }
438 418
439 spin_unlock_irq(&pos_f->lock); 419 rcu_read_lock();
420 css_put(pos);
440 } 421 }
441 rcu_read_unlock(); 422 rcu_read_unlock();
423 mutex_unlock(&freezer_mutex);
442} 424}
443 425
444static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, 426static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 6cb20d2e7ee0..019d45008448 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -120,7 +120,7 @@ void context_tracking_user_enter(void)
120 * instead of preempt_schedule() to exit user context if needed before 120 * instead of preempt_schedule() to exit user context if needed before
121 * calling the scheduler. 121 * calling the scheduler.
122 */ 122 */
123asmlinkage void __sched notrace preempt_schedule_context(void) 123asmlinkage __visible void __sched notrace preempt_schedule_context(void)
124{ 124{
125 enum ctx_state prev_ctx; 125 enum ctx_state prev_ctx;
126 126
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f83a71a3e46d..440eefc67397 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
1443 cpuctx->exclusive = 0; 1443 cpuctx->exclusive = 0;
1444} 1444}
1445 1445
1446struct remove_event {
1447 struct perf_event *event;
1448 bool detach_group;
1449};
1450
1446/* 1451/*
1447 * Cross CPU call to remove a performance event 1452 * Cross CPU call to remove a performance event
1448 * 1453 *
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
1451 */ 1456 */
1452static int __perf_remove_from_context(void *info) 1457static int __perf_remove_from_context(void *info)
1453{ 1458{
1454 struct perf_event *event = info; 1459 struct remove_event *re = info;
1460 struct perf_event *event = re->event;
1455 struct perf_event_context *ctx = event->ctx; 1461 struct perf_event_context *ctx = event->ctx;
1456 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1462 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1457 1463
1458 raw_spin_lock(&ctx->lock); 1464 raw_spin_lock(&ctx->lock);
1459 event_sched_out(event, cpuctx, ctx); 1465 event_sched_out(event, cpuctx, ctx);
1466 if (re->detach_group)
1467 perf_group_detach(event);
1460 list_del_event(event, ctx); 1468 list_del_event(event, ctx);
1461 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { 1469 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1462 ctx->is_active = 0; 1470 ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
1481 * When called from perf_event_exit_task, it's OK because the 1489 * When called from perf_event_exit_task, it's OK because the
1482 * context has been detached from its task. 1490 * context has been detached from its task.
1483 */ 1491 */
1484static void perf_remove_from_context(struct perf_event *event) 1492static void perf_remove_from_context(struct perf_event *event, bool detach_group)
1485{ 1493{
1486 struct perf_event_context *ctx = event->ctx; 1494 struct perf_event_context *ctx = event->ctx;
1487 struct task_struct *task = ctx->task; 1495 struct task_struct *task = ctx->task;
1496 struct remove_event re = {
1497 .event = event,
1498 .detach_group = detach_group,
1499 };
1488 1500
1489 lockdep_assert_held(&ctx->mutex); 1501 lockdep_assert_held(&ctx->mutex);
1490 1502
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
1493 * Per cpu events are removed via an smp call and 1505 * Per cpu events are removed via an smp call and
1494 * the removal is always successful. 1506 * the removal is always successful.
1495 */ 1507 */
1496 cpu_function_call(event->cpu, __perf_remove_from_context, event); 1508 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1497 return; 1509 return;
1498 } 1510 }
1499 1511
1500retry: 1512retry:
1501 if (!task_function_call(task, __perf_remove_from_context, event)) 1513 if (!task_function_call(task, __perf_remove_from_context, &re))
1502 return; 1514 return;
1503 1515
1504 raw_spin_lock_irq(&ctx->lock); 1516 raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
1515 * Since the task isn't running, its safe to remove the event, us 1527 * Since the task isn't running, its safe to remove the event, us
1516 * holding the ctx->lock ensures the task won't get scheduled in. 1528 * holding the ctx->lock ensures the task won't get scheduled in.
1517 */ 1529 */
1530 if (detach_group)
1531 perf_group_detach(event);
1518 list_del_event(event, ctx); 1532 list_del_event(event, ctx);
1519 raw_spin_unlock_irq(&ctx->lock); 1533 raw_spin_unlock_irq(&ctx->lock);
1520} 1534}
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
3178} 3192}
3179 3193
3180static void ring_buffer_put(struct ring_buffer *rb); 3194static void ring_buffer_put(struct ring_buffer *rb);
3181static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); 3195static void ring_buffer_attach(struct perf_event *event,
3196 struct ring_buffer *rb);
3182 3197
3183static void unaccount_event_cpu(struct perf_event *event, int cpu) 3198static void unaccount_event_cpu(struct perf_event *event, int cpu)
3184{ 3199{
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
3238 unaccount_event(event); 3253 unaccount_event(event);
3239 3254
3240 if (event->rb) { 3255 if (event->rb) {
3241 struct ring_buffer *rb;
3242
3243 /* 3256 /*
3244 * Can happen when we close an event with re-directed output. 3257 * Can happen when we close an event with re-directed output.
3245 * 3258 *
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
3247 * over us; possibly making our ring_buffer_put() the last. 3260 * over us; possibly making our ring_buffer_put() the last.
3248 */ 3261 */
3249 mutex_lock(&event->mmap_mutex); 3262 mutex_lock(&event->mmap_mutex);
3250 rb = event->rb; 3263 ring_buffer_attach(event, NULL);
3251 if (rb) {
3252 rcu_assign_pointer(event->rb, NULL);
3253 ring_buffer_detach(event, rb);
3254 ring_buffer_put(rb); /* could be last */
3255 }
3256 mutex_unlock(&event->mmap_mutex); 3264 mutex_unlock(&event->mmap_mutex);
3257 } 3265 }
3258 3266
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
3281 * to trigger the AB-BA case. 3289 * to trigger the AB-BA case.
3282 */ 3290 */
3283 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 3291 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
3284 raw_spin_lock_irq(&ctx->lock); 3292 perf_remove_from_context(event, true);
3285 perf_group_detach(event);
3286 raw_spin_unlock_irq(&ctx->lock);
3287 perf_remove_from_context(event);
3288 mutex_unlock(&ctx->mutex); 3293 mutex_unlock(&ctx->mutex);
3289 3294
3290 free_event(event); 3295 free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
3839static void ring_buffer_attach(struct perf_event *event, 3844static void ring_buffer_attach(struct perf_event *event,
3840 struct ring_buffer *rb) 3845 struct ring_buffer *rb)
3841{ 3846{
3847 struct ring_buffer *old_rb = NULL;
3842 unsigned long flags; 3848 unsigned long flags;
3843 3849
3844 if (!list_empty(&event->rb_entry)) 3850 if (event->rb) {
3845 return; 3851 /*
3852 * Should be impossible, we set this when removing
3853 * event->rb_entry and wait/clear when adding event->rb_entry.
3854 */
3855 WARN_ON_ONCE(event->rcu_pending);
3846 3856
3847 spin_lock_irqsave(&rb->event_lock, flags); 3857 old_rb = event->rb;
3848 if (list_empty(&event->rb_entry)) 3858 event->rcu_batches = get_state_synchronize_rcu();
3849 list_add(&event->rb_entry, &rb->event_list); 3859 event->rcu_pending = 1;
3850 spin_unlock_irqrestore(&rb->event_lock, flags);
3851}
3852 3860
3853static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) 3861 spin_lock_irqsave(&old_rb->event_lock, flags);
3854{ 3862 list_del_rcu(&event->rb_entry);
3855 unsigned long flags; 3863 spin_unlock_irqrestore(&old_rb->event_lock, flags);
3864 }
3856 3865
3857 if (list_empty(&event->rb_entry)) 3866 if (event->rcu_pending && rb) {
3858 return; 3867 cond_synchronize_rcu(event->rcu_batches);
3868 event->rcu_pending = 0;
3869 }
3870
3871 if (rb) {
3872 spin_lock_irqsave(&rb->event_lock, flags);
3873 list_add_rcu(&event->rb_entry, &rb->event_list);
3874 spin_unlock_irqrestore(&rb->event_lock, flags);
3875 }
3876
3877 rcu_assign_pointer(event->rb, rb);
3859 3878
3860 spin_lock_irqsave(&rb->event_lock, flags); 3879 if (old_rb) {
3861 list_del_init(&event->rb_entry); 3880 ring_buffer_put(old_rb);
3862 wake_up_all(&event->waitq); 3881 /*
3863 spin_unlock_irqrestore(&rb->event_lock, flags); 3882 * Since we detached before setting the new rb, so that we
3883 * could attach the new rb, we could have missed a wakeup.
3884 * Provide it now.
3885 */
3886 wake_up_all(&event->waitq);
3887 }
3864} 3888}
3865 3889
3866static void ring_buffer_wakeup(struct perf_event *event) 3890static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3929{ 3953{
3930 struct perf_event *event = vma->vm_file->private_data; 3954 struct perf_event *event = vma->vm_file->private_data;
3931 3955
3932 struct ring_buffer *rb = event->rb; 3956 struct ring_buffer *rb = ring_buffer_get(event);
3933 struct user_struct *mmap_user = rb->mmap_user; 3957 struct user_struct *mmap_user = rb->mmap_user;
3934 int mmap_locked = rb->mmap_locked; 3958 int mmap_locked = rb->mmap_locked;
3935 unsigned long size = perf_data_size(rb); 3959 unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3937 atomic_dec(&rb->mmap_count); 3961 atomic_dec(&rb->mmap_count);
3938 3962
3939 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 3963 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3940 return; 3964 goto out_put;
3941 3965
3942 /* Detach current event from the buffer. */ 3966 ring_buffer_attach(event, NULL);
3943 rcu_assign_pointer(event->rb, NULL);
3944 ring_buffer_detach(event, rb);
3945 mutex_unlock(&event->mmap_mutex); 3967 mutex_unlock(&event->mmap_mutex);
3946 3968
3947 /* If there's still other mmap()s of this buffer, we're done. */ 3969 /* If there's still other mmap()s of this buffer, we're done. */
3948 if (atomic_read(&rb->mmap_count)) { 3970 if (atomic_read(&rb->mmap_count))
3949 ring_buffer_put(rb); /* can't be last */ 3971 goto out_put;
3950 return;
3951 }
3952 3972
3953 /* 3973 /*
3954 * No other mmap()s, detach from all other events that might redirect 3974 * No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
3978 * still restart the iteration to make sure we're not now 3998 * still restart the iteration to make sure we're not now
3979 * iterating the wrong list. 3999 * iterating the wrong list.
3980 */ 4000 */
3981 if (event->rb == rb) { 4001 if (event->rb == rb)
3982 rcu_assign_pointer(event->rb, NULL); 4002 ring_buffer_attach(event, NULL);
3983 ring_buffer_detach(event, rb); 4003
3984 ring_buffer_put(rb); /* can't be last, we still have one */
3985 }
3986 mutex_unlock(&event->mmap_mutex); 4004 mutex_unlock(&event->mmap_mutex);
3987 put_event(event); 4005 put_event(event);
3988 4006
@@ -4007,6 +4025,7 @@ again:
4007 vma->vm_mm->pinned_vm -= mmap_locked; 4025 vma->vm_mm->pinned_vm -= mmap_locked;
4008 free_uid(mmap_user); 4026 free_uid(mmap_user);
4009 4027
4028out_put:
4010 ring_buffer_put(rb); /* could be last */ 4029 ring_buffer_put(rb); /* could be last */
4011} 4030}
4012 4031
@@ -4124,7 +4143,6 @@ again:
4124 vma->vm_mm->pinned_vm += extra; 4143 vma->vm_mm->pinned_vm += extra;
4125 4144
4126 ring_buffer_attach(event, rb); 4145 ring_buffer_attach(event, rb);
4127 rcu_assign_pointer(event->rb, rb);
4128 4146
4129 perf_event_init_userpage(event); 4147 perf_event_init_userpage(event);
4130 perf_event_update_userpage(event); 4148 perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
5408 5426
5409 /* Recursion avoidance in each contexts */ 5427 /* Recursion avoidance in each contexts */
5410 int recursion[PERF_NR_CONTEXTS]; 5428 int recursion[PERF_NR_CONTEXTS];
5429
5430 /* Keeps track of cpu being initialized/exited */
5431 bool online;
5411}; 5432};
5412 5433
5413static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 5434static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
5654 hwc->state = !(flags & PERF_EF_START); 5675 hwc->state = !(flags & PERF_EF_START);
5655 5676
5656 head = find_swevent_head(swhash, event); 5677 head = find_swevent_head(swhash, event);
5657 if (WARN_ON_ONCE(!head)) 5678 if (!head) {
5679 /*
5680 * We can race with cpu hotplug code. Do not
5681 * WARN if the cpu just got unplugged.
5682 */
5683 WARN_ON_ONCE(swhash->online);
5658 return -EINVAL; 5684 return -EINVAL;
5685 }
5659 5686
5660 hlist_add_head_rcu(&event->hlist_entry, head); 5687 hlist_add_head_rcu(&event->hlist_entry, head);
5661 5688
@@ -6914,7 +6941,7 @@ err_size:
6914static int 6941static int
6915perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 6942perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6916{ 6943{
6917 struct ring_buffer *rb = NULL, *old_rb = NULL; 6944 struct ring_buffer *rb = NULL;
6918 int ret = -EINVAL; 6945 int ret = -EINVAL;
6919 6946
6920 if (!output_event) 6947 if (!output_event)
@@ -6942,8 +6969,6 @@ set:
6942 if (atomic_read(&event->mmap_count)) 6969 if (atomic_read(&event->mmap_count))
6943 goto unlock; 6970 goto unlock;
6944 6971
6945 old_rb = event->rb;
6946
6947 if (output_event) { 6972 if (output_event) {
6948 /* get the rb we want to redirect to */ 6973 /* get the rb we want to redirect to */
6949 rb = ring_buffer_get(output_event); 6974 rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
6951 goto unlock; 6976 goto unlock;
6952 } 6977 }
6953 6978
6954 if (old_rb) 6979 ring_buffer_attach(event, rb);
6955 ring_buffer_detach(event, old_rb);
6956
6957 if (rb)
6958 ring_buffer_attach(event, rb);
6959
6960 rcu_assign_pointer(event->rb, rb);
6961
6962 if (old_rb) {
6963 ring_buffer_put(old_rb);
6964 /*
6965 * Since we detached before setting the new rb, so that we
6966 * could attach the new rb, we could have missed a wakeup.
6967 * Provide it now.
6968 */
6969 wake_up_all(&event->waitq);
6970 }
6971 6980
6972 ret = 0; 6981 ret = 0;
6973unlock: 6982unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
7018 if (attr.freq) { 7027 if (attr.freq) {
7019 if (attr.sample_freq > sysctl_perf_event_sample_rate) 7028 if (attr.sample_freq > sysctl_perf_event_sample_rate)
7020 return -EINVAL; 7029 return -EINVAL;
7030 } else {
7031 if (attr.sample_period & (1ULL << 63))
7032 return -EINVAL;
7021 } 7033 }
7022 7034
7023 /* 7035 /*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
7165 struct perf_event_context *gctx = group_leader->ctx; 7177 struct perf_event_context *gctx = group_leader->ctx;
7166 7178
7167 mutex_lock(&gctx->mutex); 7179 mutex_lock(&gctx->mutex);
7168 perf_remove_from_context(group_leader); 7180 perf_remove_from_context(group_leader, false);
7169 7181
7170 /* 7182 /*
7171 * Removing from the context ends up with disabled 7183 * Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
7175 perf_event__state_init(group_leader); 7187 perf_event__state_init(group_leader);
7176 list_for_each_entry(sibling, &group_leader->sibling_list, 7188 list_for_each_entry(sibling, &group_leader->sibling_list,
7177 group_entry) { 7189 group_entry) {
7178 perf_remove_from_context(sibling); 7190 perf_remove_from_context(sibling, false);
7179 perf_event__state_init(sibling); 7191 perf_event__state_init(sibling);
7180 put_ctx(gctx); 7192 put_ctx(gctx);
7181 } 7193 }
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7305 mutex_lock(&src_ctx->mutex); 7317 mutex_lock(&src_ctx->mutex);
7306 list_for_each_entry_safe(event, tmp, &src_ctx->event_list, 7318 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7307 event_entry) { 7319 event_entry) {
7308 perf_remove_from_context(event); 7320 perf_remove_from_context(event, false);
7309 unaccount_event_cpu(event, src_cpu); 7321 unaccount_event_cpu(event, src_cpu);
7310 put_ctx(src_ctx); 7322 put_ctx(src_ctx);
7311 list_add(&event->migrate_entry, &events); 7323 list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
7367 struct perf_event_context *child_ctx, 7379 struct perf_event_context *child_ctx,
7368 struct task_struct *child) 7380 struct task_struct *child)
7369{ 7381{
7370 if (child_event->parent) { 7382 perf_remove_from_context(child_event, !!child_event->parent);
7371 raw_spin_lock_irq(&child_ctx->lock);
7372 perf_group_detach(child_event);
7373 raw_spin_unlock_irq(&child_ctx->lock);
7374 }
7375
7376 perf_remove_from_context(child_event);
7377 7383
7378 /* 7384 /*
7379 * It can happen that the parent exits first, and has events 7385 * It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
7724 * swapped under us. 7730 * swapped under us.
7725 */ 7731 */
7726 parent_ctx = perf_pin_task_context(parent, ctxn); 7732 parent_ctx = perf_pin_task_context(parent, ctxn);
7733 if (!parent_ctx)
7734 return 0;
7727 7735
7728 /* 7736 /*
7729 * No need to check if parent_ctx != NULL here; since we saw 7737 * No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
7835 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7843 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7836 7844
7837 mutex_lock(&swhash->hlist_mutex); 7845 mutex_lock(&swhash->hlist_mutex);
7846 swhash->online = true;
7838 if (swhash->hlist_refcount > 0) { 7847 if (swhash->hlist_refcount > 0) {
7839 struct swevent_hlist *hlist; 7848 struct swevent_hlist *hlist;
7840 7849
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
7857 7866
7858static void __perf_event_exit_context(void *__info) 7867static void __perf_event_exit_context(void *__info)
7859{ 7868{
7869 struct remove_event re = { .detach_group = false };
7860 struct perf_event_context *ctx = __info; 7870 struct perf_event_context *ctx = __info;
7861 struct perf_event *event;
7862 7871
7863 perf_pmu_rotate_stop(ctx->pmu); 7872 perf_pmu_rotate_stop(ctx->pmu);
7864 7873
7865 rcu_read_lock(); 7874 rcu_read_lock();
7866 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) 7875 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
7867 __perf_remove_from_context(event); 7876 __perf_remove_from_context(&re);
7868 rcu_read_unlock(); 7877 rcu_read_unlock();
7869} 7878}
7870 7879
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
7892 perf_event_exit_cpu_context(cpu); 7901 perf_event_exit_cpu_context(cpu);
7893 7902
7894 mutex_lock(&swhash->hlist_mutex); 7903 mutex_lock(&swhash->hlist_mutex);
7904 swhash->online = false;
7895 swevent_hlist_release(swhash); 7905 swevent_hlist_release(swhash);
7896 mutex_unlock(&swhash->hlist_mutex); 7906 mutex_unlock(&swhash->hlist_mutex);
7897} 7907}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6b715c0af1b1..e0501fe7140d 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -990,11 +990,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
990 /* Remove an active timer from the queue: */ 990 /* Remove an active timer from the queue: */
991 ret = remove_hrtimer(timer, base); 991 ret = remove_hrtimer(timer, base);
992 992
993 /* Switch the timer base, if necessary: */
994 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
995
996 if (mode & HRTIMER_MODE_REL) { 993 if (mode & HRTIMER_MODE_REL) {
997 tim = ktime_add_safe(tim, new_base->get_time()); 994 tim = ktime_add_safe(tim, base->get_time());
998 /* 995 /*
999 * CONFIG_TIME_LOW_RES is a temporary way for architectures 996 * CONFIG_TIME_LOW_RES is a temporary way for architectures
1000 * to signal that they simply return xtime in 997 * to signal that they simply return xtime in
@@ -1009,6 +1006,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1009 1006
1010 hrtimer_set_expires_range_ns(timer, tim, delta_ns); 1007 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1011 1008
1009 /* Switch the timer base, if necessary: */
1010 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1011
1012 timer_stats_hrtimer_set_start_info(timer); 1012 timer_stats_hrtimer_set_start_info(timer);
1013 1013
1014 leftmost = enqueue_hrtimer(timer, new_base); 1014 leftmost = enqueue_hrtimer(timer, new_base);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index b0e9467922e1..d24e4339b46d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
4188} 4188}
4189EXPORT_SYMBOL_GPL(debug_show_held_locks); 4189EXPORT_SYMBOL_GPL(debug_show_held_locks);
4190 4190
4191asmlinkage void lockdep_sys_exit(void) 4191asmlinkage __visible void lockdep_sys_exit(void)
4192{ 4192{
4193 struct task_struct *curr = current; 4193 struct task_struct *curr = current;
4194 4194
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 18fb7a2fb14b..1ea328aafdc9 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1586,7 +1586,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1586 return -ENOMEM; 1586 return -ENOMEM;
1587} 1587}
1588 1588
1589asmlinkage int swsusp_save(void) 1589asmlinkage __visible int swsusp_save(void)
1590{ 1590{
1591 unsigned int nr_pages, nr_highmem; 1591 unsigned int nr_pages, nr_highmem;
1592 1592
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a45b50962295..7228258b85ec 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(printk_emit);
1674 * 1674 *
1675 * See the vsnprintf() documentation for format string extensions over C99. 1675 * See the vsnprintf() documentation for format string extensions over C99.
1676 */ 1676 */
1677asmlinkage int printk(const char *fmt, ...) 1677asmlinkage __visible int printk(const char *fmt, ...)
1678{ 1678{
1679 va_list args; 1679 va_list args;
1680 int r; 1680 int r;
@@ -1737,7 +1737,7 @@ void early_vprintk(const char *fmt, va_list ap)
1737 } 1737 }
1738} 1738}
1739 1739
1740asmlinkage void early_printk(const char *fmt, ...) 1740asmlinkage __visible void early_printk(const char *fmt, ...)
1741{ 1741{
1742 va_list ap; 1742 va_list ap;
1743 1743
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 268a45ea238c..204d3d281809 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
2192 * schedule_tail - first thing a freshly forked thread must call. 2192 * schedule_tail - first thing a freshly forked thread must call.
2193 * @prev: the thread we just switched away from. 2193 * @prev: the thread we just switched away from.
2194 */ 2194 */
2195asmlinkage void schedule_tail(struct task_struct *prev) 2195asmlinkage __visible void schedule_tail(struct task_struct *prev)
2196 __releases(rq->lock) 2196 __releases(rq->lock)
2197{ 2197{
2198 struct rq *rq = this_rq(); 2198 struct rq *rq = this_rq();
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
2592 if (likely(prev->sched_class == class && 2592 if (likely(prev->sched_class == class &&
2593 rq->nr_running == rq->cfs.h_nr_running)) { 2593 rq->nr_running == rq->cfs.h_nr_running)) {
2594 p = fair_sched_class.pick_next_task(rq, prev); 2594 p = fair_sched_class.pick_next_task(rq, prev);
2595 if (likely(p && p != RETRY_TASK)) 2595 if (unlikely(p == RETRY_TASK))
2596 return p; 2596 goto again;
2597
2598 /* assumes fair_sched_class->next == idle_sched_class */
2599 if (unlikely(!p))
2600 p = idle_sched_class.pick_next_task(rq, prev);
2601
2602 return p;
2597 } 2603 }
2598 2604
2599again: 2605again:
@@ -2741,7 +2747,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
2741 blk_schedule_flush_plug(tsk); 2747 blk_schedule_flush_plug(tsk);
2742} 2748}
2743 2749
2744asmlinkage void __sched schedule(void) 2750asmlinkage __visible void __sched schedule(void)
2745{ 2751{
2746 struct task_struct *tsk = current; 2752 struct task_struct *tsk = current;
2747 2753
@@ -2751,7 +2757,7 @@ asmlinkage void __sched schedule(void)
2751EXPORT_SYMBOL(schedule); 2757EXPORT_SYMBOL(schedule);
2752 2758
2753#ifdef CONFIG_CONTEXT_TRACKING 2759#ifdef CONFIG_CONTEXT_TRACKING
2754asmlinkage void __sched schedule_user(void) 2760asmlinkage __visible void __sched schedule_user(void)
2755{ 2761{
2756 /* 2762 /*
2757 * If we come here after a random call to set_need_resched(), 2763 * If we come here after a random call to set_need_resched(),
@@ -2783,7 +2789,7 @@ void __sched schedule_preempt_disabled(void)
2783 * off of preempt_enable. Kernel preemptions off return from interrupt 2789 * off of preempt_enable. Kernel preemptions off return from interrupt
2784 * occur there and call schedule directly. 2790 * occur there and call schedule directly.
2785 */ 2791 */
2786asmlinkage void __sched notrace preempt_schedule(void) 2792asmlinkage __visible void __sched notrace preempt_schedule(void)
2787{ 2793{
2788 /* 2794 /*
2789 * If there is a non-zero preempt_count or interrupts are disabled, 2795 * If there is a non-zero preempt_count or interrupts are disabled,
@@ -2813,7 +2819,7 @@ EXPORT_SYMBOL(preempt_schedule);
2813 * Note, that this is called and return with irqs disabled. This will 2819 * Note, that this is called and return with irqs disabled. This will
2814 * protect us against recursive calling from irq. 2820 * protect us against recursive calling from irq.
2815 */ 2821 */
2816asmlinkage void __sched preempt_schedule_irq(void) 2822asmlinkage __visible void __sched preempt_schedule_irq(void)
2817{ 2823{
2818 enum ctx_state prev_state; 2824 enum ctx_state prev_state;
2819 2825
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3124 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3130 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3125 dl_se->dl_throttled = 0; 3131 dl_se->dl_throttled = 0;
3126 dl_se->dl_new = 1; 3132 dl_se->dl_new = 1;
3133 dl_se->dl_yielded = 0;
3127} 3134}
3128 3135
3129static void __setscheduler_params(struct task_struct *p, 3136static void __setscheduler_params(struct task_struct *p,
@@ -3639,6 +3646,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3639 * sys_sched_setattr - same as above, but with extended sched_attr 3646 * sys_sched_setattr - same as above, but with extended sched_attr
3640 * @pid: the pid in question. 3647 * @pid: the pid in question.
3641 * @uattr: structure containing the extended parameters. 3648 * @uattr: structure containing the extended parameters.
3649 * @flags: for future extension.
3642 */ 3650 */
3643SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 3651SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
3644 unsigned int, flags) 3652 unsigned int, flags)
@@ -3783,6 +3791,7 @@ err_size:
3783 * @pid: the pid in question. 3791 * @pid: the pid in question.
3784 * @uattr: structure containing the extended parameters. 3792 * @uattr: structure containing the extended parameters.
3785 * @size: sizeof(attr) for fwd/bwd comp. 3793 * @size: sizeof(attr) for fwd/bwd comp.
3794 * @flags: for future extension.
3786 */ 3795 */
3787SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 3796SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3788 unsigned int, size, unsigned int, flags) 3797 unsigned int, size, unsigned int, flags)
@@ -6017,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
6017 , 6026 ,
6018 .last_balance = jiffies, 6027 .last_balance = jiffies,
6019 .balance_interval = sd_weight, 6028 .balance_interval = sd_weight,
6029 .max_newidle_lb_cost = 0,
6030 .next_decay_max_lb_cost = jiffies,
6020 }; 6031 };
6021 SD_INIT_NAME(sd, NUMA); 6032 SD_INIT_NAME(sd, NUMA);
6022 sd->private = &tl->data; 6033 sd->private = &tl->data;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 5b9bb42b2d47..ab001b5d5048 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp)
210 */ 210 */
211void cpudl_cleanup(struct cpudl *cp) 211void cpudl_cleanup(struct cpudl *cp)
212{ 212{
213 /* 213 free_cpumask_var(cp->free_cpus);
214 * nothing to do for the moment
215 */
216} 214}
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 8b836b376d91..3031bac8aa3e 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
70 int idx = 0; 70 int idx = 0;
71 int task_pri = convert_prio(p->prio); 71 int task_pri = convert_prio(p->prio);
72 72
73 if (task_pri >= MAX_RT_PRIO) 73 BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
74 return 0;
75 74
76 for (idx = 0; idx < task_pri; idx++) { 75 for (idx = 0; idx < task_pri; idx++) {
77 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; 76 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a95097cb4591..72fdf06ef865 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -332,50 +332,50 @@ out:
332 * softirq as those do not count in task exec_runtime any more. 332 * softirq as those do not count in task exec_runtime any more.
333 */ 333 */
334static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 334static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
335 struct rq *rq) 335 struct rq *rq, int ticks)
336{ 336{
337 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 337 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
338 u64 cputime = (__force u64) cputime_one_jiffy;
338 u64 *cpustat = kcpustat_this_cpu->cpustat; 339 u64 *cpustat = kcpustat_this_cpu->cpustat;
339 340
340 if (steal_account_process_tick()) 341 if (steal_account_process_tick())
341 return; 342 return;
342 343
344 cputime *= ticks;
345 scaled *= ticks;
346
343 if (irqtime_account_hi_update()) { 347 if (irqtime_account_hi_update()) {
344 cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; 348 cpustat[CPUTIME_IRQ] += cputime;
345 } else if (irqtime_account_si_update()) { 349 } else if (irqtime_account_si_update()) {
346 cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; 350 cpustat[CPUTIME_SOFTIRQ] += cputime;
347 } else if (this_cpu_ksoftirqd() == p) { 351 } else if (this_cpu_ksoftirqd() == p) {
348 /* 352 /*
349 * ksoftirqd time do not get accounted in cpu_softirq_time. 353 * ksoftirqd time do not get accounted in cpu_softirq_time.
350 * So, we have to handle it separately here. 354 * So, we have to handle it separately here.
351 * Also, p->stime needs to be updated for ksoftirqd. 355 * Also, p->stime needs to be updated for ksoftirqd.
352 */ 356 */
353 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, 357 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
354 CPUTIME_SOFTIRQ);
355 } else if (user_tick) { 358 } else if (user_tick) {
356 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 359 account_user_time(p, cputime, scaled);
357 } else if (p == rq->idle) { 360 } else if (p == rq->idle) {
358 account_idle_time(cputime_one_jiffy); 361 account_idle_time(cputime);
359 } else if (p->flags & PF_VCPU) { /* System time or guest time */ 362 } else if (p->flags & PF_VCPU) { /* System time or guest time */
360 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); 363 account_guest_time(p, cputime, scaled);
361 } else { 364 } else {
362 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, 365 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
363 CPUTIME_SYSTEM);
364 } 366 }
365} 367}
366 368
367static void irqtime_account_idle_ticks(int ticks) 369static void irqtime_account_idle_ticks(int ticks)
368{ 370{
369 int i;
370 struct rq *rq = this_rq(); 371 struct rq *rq = this_rq();
371 372
372 for (i = 0; i < ticks; i++) 373 irqtime_account_process_tick(current, 0, rq, ticks);
373 irqtime_account_process_tick(current, 0, rq);
374} 374}
375#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 375#else /* CONFIG_IRQ_TIME_ACCOUNTING */
376static inline void irqtime_account_idle_ticks(int ticks) {} 376static inline void irqtime_account_idle_ticks(int ticks) {}
377static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, 377static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
378 struct rq *rq) {} 378 struct rq *rq, int nr_ticks) {}
379#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 379#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
380 380
381/* 381/*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
464 return; 464 return;
465 465
466 if (sched_clock_irqtime) { 466 if (sched_clock_irqtime) {
467 irqtime_account_process_tick(p, user_tick, rq); 467 irqtime_account_process_tick(p, user_tick, rq, 1);
468 return; 468 return;
469 } 469 }
470 470
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b08095786cb8..800e99b99075 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
528 sched_clock_tick(); 528 sched_clock_tick();
529 update_rq_clock(rq); 529 update_rq_clock(rq);
530 dl_se->dl_throttled = 0; 530 dl_se->dl_throttled = 0;
531 dl_se->dl_yielded = 0;
531 if (p->on_rq) { 532 if (p->on_rq) {
532 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 533 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
533 if (task_has_dl_policy(rq->curr)) 534 if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
893 * We make the task go to sleep until its current deadline by 894 * We make the task go to sleep until its current deadline by
894 * forcing its runtime to zero. This way, update_curr_dl() stops 895 * forcing its runtime to zero. This way, update_curr_dl() stops
895 * it and the bandwidth timer will wake it up and will give it 896 * it and the bandwidth timer will wake it up and will give it
896 * new scheduling parameters (thanks to dl_new=1). 897 * new scheduling parameters (thanks to dl_yielded=1).
897 */ 898 */
898 if (p->dl.runtime > 0) { 899 if (p->dl.runtime > 0) {
899 rq->curr->dl.dl_new = 1; 900 rq->curr->dl.dl_yielded = 1;
900 p->dl.runtime = 0; 901 p->dl.runtime = 0;
901 } 902 }
902 update_curr_dl(rq); 903 update_curr_dl(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7570dd969c28..0fdb96de81a5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
6653 int this_cpu = this_rq->cpu; 6653 int this_cpu = this_rq->cpu;
6654 6654
6655 idle_enter_fair(this_rq); 6655 idle_enter_fair(this_rq);
6656
6656 /* 6657 /*
6657 * We must set idle_stamp _before_ calling idle_balance(), such that we 6658 * We must set idle_stamp _before_ calling idle_balance(), such that we
6658 * measure the duration of idle_balance() as idle time. 6659 * measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
6705 6706
6706 raw_spin_lock(&this_rq->lock); 6707 raw_spin_lock(&this_rq->lock);
6707 6708
6709 if (curr_cost > this_rq->max_idle_balance_cost)
6710 this_rq->max_idle_balance_cost = curr_cost;
6711
6708 /* 6712 /*
6709 * While browsing the domains, we released the rq lock. 6713 * While browsing the domains, we released the rq lock, a task could
6710 * A task could have be enqueued in the meantime 6714 * have been enqueued in the meantime. Since we're not going idle,
6715 * pretend we pulled a task.
6711 */ 6716 */
6712 if (this_rq->cfs.h_nr_running && !pulled_task) { 6717 if (this_rq->cfs.h_nr_running && !pulled_task)
6713 pulled_task = 1; 6718 pulled_task = 1;
6714 goto out;
6715 }
6716 6719
6717 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 6720 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6718 /* 6721 /*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
6722 this_rq->next_balance = next_balance; 6725 this_rq->next_balance = next_balance;
6723 } 6726 }
6724 6727
6725 if (curr_cost > this_rq->max_idle_balance_cost)
6726 this_rq->max_idle_balance_cost = curr_cost;
6727
6728out: 6728out:
6729 /* Is there a task of a high priority class? */ 6729 /* Is there a task of a high priority class? */
6730 if (this_rq->nr_running != this_rq->cfs.h_nr_running && 6730 if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 33e4648ae0e7..92f24f5e8d52 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -223,7 +223,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
223static inline void lockdep_softirq_end(bool in_hardirq) { } 223static inline void lockdep_softirq_end(bool in_hardirq) { }
224#endif 224#endif
225 225
226asmlinkage void __do_softirq(void) 226asmlinkage __visible void __do_softirq(void)
227{ 227{
228 unsigned long end = jiffies + MAX_SOFTIRQ_TIME; 228 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
229 unsigned long old_flags = current->flags; 229 unsigned long old_flags = current->flags;
@@ -299,7 +299,7 @@ restart:
299 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 299 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
300} 300}
301 301
302asmlinkage void do_softirq(void) 302asmlinkage __visible void do_softirq(void)
303{ 303{
304 __u32 pending; 304 __u32 pending;
305 unsigned long flags; 305 unsigned long flags;
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index ac5b23cf7212..6620e5837ce2 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -188,7 +188,6 @@ static int tracepoint_add_func(struct tracepoint *tp,
188 WARN_ON_ONCE(1); 188 WARN_ON_ONCE(1);
189 return PTR_ERR(old); 189 return PTR_ERR(old);
190 } 190 }
191 release_probes(old);
192 191
193 /* 192 /*
194 * rcu_assign_pointer has a smp_wmb() which makes sure that the new 193 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
@@ -200,6 +199,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
200 rcu_assign_pointer(tp->funcs, tp_funcs); 199 rcu_assign_pointer(tp->funcs, tp_funcs);
201 if (!static_key_enabled(&tp->key)) 200 if (!static_key_enabled(&tp->key))
202 static_key_slow_inc(&tp->key); 201 static_key_slow_inc(&tp->key);
202 release_probes(old);
203 return 0; 203 return 0;
204} 204}
205 205
@@ -221,7 +221,6 @@ static int tracepoint_remove_func(struct tracepoint *tp,
221 WARN_ON_ONCE(1); 221 WARN_ON_ONCE(1);
222 return PTR_ERR(old); 222 return PTR_ERR(old);
223 } 223 }
224 release_probes(old);
225 224
226 if (!tp_funcs) { 225 if (!tp_funcs) {
227 /* Removed last function */ 226 /* Removed last function */
@@ -232,6 +231,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
232 static_key_slow_dec(&tp->key); 231 static_key_slow_dec(&tp->key);
233 } 232 }
234 rcu_assign_pointer(tp->funcs, tp_funcs); 233 rcu_assign_pointer(tp->funcs, tp_funcs);
234 release_probes(old);
235 return 0; 235 return 0;
236} 236}
237 237
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0ee63af30bd1..8edc87185427 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1916,6 +1916,12 @@ static void send_mayday(struct work_struct *work)
1916 1916
1917 /* mayday mayday mayday */ 1917 /* mayday mayday mayday */
1918 if (list_empty(&pwq->mayday_node)) { 1918 if (list_empty(&pwq->mayday_node)) {
1919 /*
1920 * If @pwq is for an unbound wq, its base ref may be put at
1921 * any time due to an attribute change. Pin @pwq until the
1922 * rescuer is done with it.
1923 */
1924 get_pwq(pwq);
1919 list_add_tail(&pwq->mayday_node, &wq->maydays); 1925 list_add_tail(&pwq->mayday_node, &wq->maydays);
1920 wake_up_process(wq->rescuer->task); 1926 wake_up_process(wq->rescuer->task);
1921 } 1927 }
@@ -2398,6 +2404,7 @@ static int rescuer_thread(void *__rescuer)
2398 struct worker *rescuer = __rescuer; 2404 struct worker *rescuer = __rescuer;
2399 struct workqueue_struct *wq = rescuer->rescue_wq; 2405 struct workqueue_struct *wq = rescuer->rescue_wq;
2400 struct list_head *scheduled = &rescuer->scheduled; 2406 struct list_head *scheduled = &rescuer->scheduled;
2407 bool should_stop;
2401 2408
2402 set_user_nice(current, RESCUER_NICE_LEVEL); 2409 set_user_nice(current, RESCUER_NICE_LEVEL);
2403 2410
@@ -2409,11 +2416,15 @@ static int rescuer_thread(void *__rescuer)
2409repeat: 2416repeat:
2410 set_current_state(TASK_INTERRUPTIBLE); 2417 set_current_state(TASK_INTERRUPTIBLE);
2411 2418
2412 if (kthread_should_stop()) { 2419 /*
2413 __set_current_state(TASK_RUNNING); 2420 * By the time the rescuer is requested to stop, the workqueue
2414 rescuer->task->flags &= ~PF_WQ_WORKER; 2421 * shouldn't have any work pending, but @wq->maydays may still have
2415 return 0; 2422 * pwq(s) queued. This can happen by non-rescuer workers consuming
2416 } 2423 * all the work items before the rescuer got to them. Go through
2424 * @wq->maydays processing before acting on should_stop so that the
2425 * list is always empty on exit.
2426 */
2427 should_stop = kthread_should_stop();
2417 2428
2418 /* see whether any pwq is asking for help */ 2429 /* see whether any pwq is asking for help */
2419 spin_lock_irq(&wq_mayday_lock); 2430 spin_lock_irq(&wq_mayday_lock);
@@ -2445,6 +2456,12 @@ repeat:
2445 process_scheduled_works(rescuer); 2456 process_scheduled_works(rescuer);
2446 2457
2447 /* 2458 /*
2459 * Put the reference grabbed by send_mayday(). @pool won't
2460 * go away while we're holding its lock.
2461 */
2462 put_pwq(pwq);
2463
2464 /*
2448 * Leave this pool. If keep_working() is %true, notify a 2465 * Leave this pool. If keep_working() is %true, notify a
2449 * regular worker; otherwise, we end up with 0 concurrency 2466 * regular worker; otherwise, we end up with 0 concurrency
2450 * and stalling the execution. 2467 * and stalling the execution.
@@ -2459,6 +2476,12 @@ repeat:
2459 2476
2460 spin_unlock_irq(&wq_mayday_lock); 2477 spin_unlock_irq(&wq_mayday_lock);
2461 2478
2479 if (should_stop) {
2480 __set_current_state(TASK_RUNNING);
2481 rescuer->task->flags &= ~PF_WQ_WORKER;
2482 return 0;
2483 }
2484
2462 /* rescuers should never participate in concurrency management */ 2485 /* rescuers should never participate in concurrency management */
2463 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2486 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2464 schedule(); 2487 schedule();
@@ -4100,7 +4123,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4100 if (!pwq) { 4123 if (!pwq) {
4101 pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", 4124 pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4102 wq->name); 4125 wq->name);
4103 goto out_unlock; 4126 mutex_lock(&wq->mutex);
4127 goto use_dfl_pwq;
4104 } 4128 }
4105 4129
4106 /* 4130 /*