aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 10:17:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 10:17:17 -0500
commit3478588b5136966c80c571cf0006f08e9e5b8f04 (patch)
tree186e67cce850bbdaf91aa678406d68fb87c5597c /kernel/sched
parentc8f5ed6ef972ed4fd10b0c2e2baec3b6803d3c73 (diff)
parent28d49e282665e2a51cc91b716937fccfa24d80e1 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The biggest part of this tree is the new auto-generated atomics API wrappers by Mark Rutland. The primary motivation was to allow instrumentation without uglifying the primary source code. The linecount increase comes from adding the auto-generated files to the Git space as well: include/asm-generic/atomic-instrumented.h | 1689 ++++++++++++++++-- include/asm-generic/atomic-long.h | 1174 ++++++++++--- include/linux/atomic-fallback.h | 2295 +++++++++++++++++++++++++ include/linux/atomic.h | 1241 +------------ I preferred this approach, so that the full call stack of the (already complex) locking APIs is still fully visible in 'git grep'. But if this is excessive we could certainly hide them. There's a separate build-time mechanism to determine whether the headers are out of date (they should never be stale if we do our job right). Anyway, nothing from this should be visible to regular kernel developers. Other changes: - Add support for dynamic keys, which removes a source of false positives in the workqueue code, among other things (Bart Van Assche) - Updates to tools/memory-model (Andrea Parri, Paul E. McKenney) - qspinlock, wake_q and lockdep micro-optimizations (Waiman Long) - misc other updates and enhancements" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits) locking/lockdep: Shrink struct lock_class_key locking/lockdep: Add module_param to enable consistency checks lockdep/lib/tests: Test dynamic key registration lockdep/lib/tests: Fix run_tests.sh kernel/workqueue: Use dynamic lockdep keys for workqueues locking/lockdep: Add support for dynamic keys locking/lockdep: Verify whether lock objects are small enough to be used as class keys locking/lockdep: Check data structure consistency locking/lockdep: Reuse lock chains that have been freed locking/lockdep: Fix a comment in add_chain_cache() locking/lockdep: Introduce lockdep_next_lockchain() and lock_chain_count() locking/lockdep: Reuse list entries that are no longer in use locking/lockdep: Free lock classes that are no longer in use locking/lockdep: Update two outdated comments locking/lockdep: Make it easy to detect whether or not inside a selftest locking/lockdep: Split lockdep_free_key_range() and lockdep_reset_lock() locking/lockdep: Initialize the locks_before and locks_after lists earlier locking/lockdep: Make zap_class() remove all matching lock order entries locking/lockdep: Reorder struct lock_class members locking/lockdep: Avoid that add_chain_cache() adds an invalid chain to the cache ...
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c67
1 files changed, 46 insertions, 21 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 43f44539b88f..0002995570db 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -396,19 +396,7 @@ static bool set_nr_if_polling(struct task_struct *p)
396#endif 396#endif
397#endif 397#endif
398 398
399/** 399static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
400 * wake_q_add() - queue a wakeup for 'later' waking.
401 * @head: the wake_q_head to add @task to
402 * @task: the task to queue for 'later' wakeup
403 *
404 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
405 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
406 * instantly.
407 *
408 * This function must be used as-if it were wake_up_process(); IOW the task
409 * must be ready to be woken at this location.
410 */
411void wake_q_add(struct wake_q_head *head, struct task_struct *task)
412{ 400{
413 struct wake_q_node *node = &task->wake_q; 401 struct wake_q_node *node = &task->wake_q;
414 402
@@ -421,16 +409,56 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
421 * state, even in the failed case, an explicit smp_mb() must be used. 409 * state, even in the failed case, an explicit smp_mb() must be used.
422 */ 410 */
423 smp_mb__before_atomic(); 411 smp_mb__before_atomic();
424 if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)) 412 if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
425 return; 413 return false;
426
427 get_task_struct(task);
428 414
429 /* 415 /*
430 * The head is context local, there can be no concurrency. 416 * The head is context local, there can be no concurrency.
431 */ 417 */
432 *head->lastp = node; 418 *head->lastp = node;
433 head->lastp = &node->next; 419 head->lastp = &node->next;
420 return true;
421}
422
423/**
424 * wake_q_add() - queue a wakeup for 'later' waking.
425 * @head: the wake_q_head to add @task to
426 * @task: the task to queue for 'later' wakeup
427 *
428 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
429 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
430 * instantly.
431 *
432 * This function must be used as-if it were wake_up_process(); IOW the task
433 * must be ready to be woken at this location.
434 */
435void wake_q_add(struct wake_q_head *head, struct task_struct *task)
436{
437 if (__wake_q_add(head, task))
438 get_task_struct(task);
439}
440
441/**
442 * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
443 * @head: the wake_q_head to add @task to
444 * @task: the task to queue for 'later' wakeup
445 *
446 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
447 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
448 * instantly.
449 *
450 * This function must be used as-if it were wake_up_process(); IOW the task
451 * must be ready to be woken at this location.
452 *
453 * This function is essentially a task-safe equivalent to wake_q_add(). Callers
454 * that already hold reference to @task can call the 'safe' version and trust
455 * wake_q to do the right thing depending whether or not the @task is already
456 * queued for wakeup.
457 */
458void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
459{
460 if (!__wake_q_add(head, task))
461 put_task_struct(task);
434} 462}
435 463
436void wake_up_q(struct wake_q_head *head) 464void wake_up_q(struct wake_q_head *head)
@@ -5866,14 +5894,11 @@ void __init sched_init_smp(void)
5866 /* 5894 /*
5867 * There's no userspace yet to cause hotplug operations; hence all the 5895 * There's no userspace yet to cause hotplug operations; hence all the
5868 * CPU masks are stable and all blatant races in the below code cannot 5896 * CPU masks are stable and all blatant races in the below code cannot
5869 * happen. The hotplug lock is nevertheless taken to satisfy lockdep, 5897 * happen.
5870 * but there won't be any contention on it.
5871 */ 5898 */
5872 cpus_read_lock();
5873 mutex_lock(&sched_domains_mutex); 5899 mutex_lock(&sched_domains_mutex);
5874 sched_init_domains(cpu_active_mask); 5900 sched_init_domains(cpu_active_mask);
5875 mutex_unlock(&sched_domains_mutex); 5901 mutex_unlock(&sched_domains_mutex);
5876 cpus_read_unlock();
5877 5902
5878 /* Move init over to a non-isolated CPU */ 5903 /* Move init over to a non-isolated CPU */
5879 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) 5904 if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)