aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c11
-rw-r--r--kernel/futex.c53
-rw-r--r--kernel/sched/clock.c4
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/trace/trace_events.c6
-rw-r--r--kernel/trace/trace_export.c7
7 files changed, 64 insertions, 28 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 105f273b6f86..0c753ddd223b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4112,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
4112 4112
4113 err = percpu_ref_init(&css->refcnt, css_release); 4113 err = percpu_ref_init(&css->refcnt, css_release);
4114 if (err) 4114 if (err)
4115 goto err_free; 4115 goto err_free_css;
4116 4116
4117 init_css(css, ss, cgrp); 4117 init_css(css, ss, cgrp);
4118 4118
4119 err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); 4119 err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
4120 if (err) 4120 if (err)
4121 goto err_free; 4121 goto err_free_percpu_ref;
4122 4122
4123 err = online_css(css); 4123 err = online_css(css);
4124 if (err) 4124 if (err)
4125 goto err_free; 4125 goto err_clear_dir;
4126 4126
4127 dget(cgrp->dentry); 4127 dget(cgrp->dentry);
4128 css_get(css->parent); 4128 css_get(css->parent);
@@ -4138,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
4138 4138
4139 return 0; 4139 return 0;
4140 4140
4141err_free: 4141err_clear_dir:
4142 cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
4143err_free_percpu_ref:
4142 percpu_ref_cancel_init(&css->refcnt); 4144 percpu_ref_cancel_init(&css->refcnt);
4145err_free_css:
4143 ss->css_free(css); 4146 ss->css_free(css);
4144 return err; 4147 return err;
4145} 4148}
diff --git a/kernel/futex.c b/kernel/futex.c
index 44a1261cb9ff..08ec814ad9d2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = {
234 * waiting on a futex. 234 * waiting on a futex.
235 */ 235 */
236struct futex_hash_bucket { 236struct futex_hash_bucket {
237 atomic_t waiters;
237 spinlock_t lock; 238 spinlock_t lock;
238 struct plist_head chain; 239 struct plist_head chain;
239} ____cacheline_aligned_in_smp; 240} ____cacheline_aligned_in_smp;
@@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key)
253 smp_mb__after_atomic_inc(); 254 smp_mb__after_atomic_inc();
254} 255}
255 256
256static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) 257/*
258 * Reflects a new waiter being added to the waitqueue.
259 */
260static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
257{ 261{
258#ifdef CONFIG_SMP 262#ifdef CONFIG_SMP
263 atomic_inc(&hb->waiters);
259 /* 264 /*
260 * Tasks trying to enter the critical region are most likely 265 * Full barrier (A), see the ordering comment above.
261 * potential waiters that will be added to the plist. Ensure
262 * that wakers won't miss to-be-slept tasks in the window between
263 * the wait call and the actual plist_add.
264 */ 266 */
265 if (spin_is_locked(&hb->lock)) 267 smp_mb__after_atomic_inc();
266 return true; 268#endif
267 smp_rmb(); /* Make sure we check the lock state first */ 269}
270
271/*
272 * Reflects a waiter being removed from the waitqueue by wakeup
273 * paths.
274 */
275static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
276{
277#ifdef CONFIG_SMP
278 atomic_dec(&hb->waiters);
279#endif
280}
268 281
269 return !plist_head_empty(&hb->chain); 282static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
283{
284#ifdef CONFIG_SMP
285 return atomic_read(&hb->waiters);
270#else 286#else
271 return true; 287 return 1;
272#endif 288#endif
273} 289}
274 290
@@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q)
954 970
955 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); 971 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
956 plist_del(&q->list, &hb->chain); 972 plist_del(&q->list, &hb->chain);
973 hb_waiters_dec(hb);
957} 974}
958 975
959/* 976/*
@@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1257 */ 1274 */
1258 if (likely(&hb1->chain != &hb2->chain)) { 1275 if (likely(&hb1->chain != &hb2->chain)) {
1259 plist_del(&q->list, &hb1->chain); 1276 plist_del(&q->list, &hb1->chain);
1277 hb_waiters_dec(hb1);
1260 plist_add(&q->list, &hb2->chain); 1278 plist_add(&q->list, &hb2->chain);
1279 hb_waiters_inc(hb2);
1261 q->lock_ptr = &hb2->lock; 1280 q->lock_ptr = &hb2->lock;
1262 } 1281 }
1263 get_futex_key_refs(key2); 1282 get_futex_key_refs(key2);
@@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1600 struct futex_hash_bucket *hb; 1619 struct futex_hash_bucket *hb;
1601 1620
1602 hb = hash_futex(&q->key); 1621 hb = hash_futex(&q->key);
1622
1623 /*
1624 * Increment the counter before taking the lock so that
1625 * a potential waker won't miss a to-be-slept task that is
1626 * waiting for the spinlock. This is safe as all queue_lock()
1627 * users end up calling queue_me(). Similarly, for housekeeping,
1628 * decrement the counter at queue_unlock() when some error has
1629 * occurred and we don't end up adding the task to the list.
1630 */
1631 hb_waiters_inc(hb);
1632
1603 q->lock_ptr = &hb->lock; 1633 q->lock_ptr = &hb->lock;
1604 1634
1605 spin_lock(&hb->lock); /* implies MB (A) */ 1635 spin_lock(&hb->lock); /* implies MB (A) */
@@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb)
1611 __releases(&hb->lock) 1641 __releases(&hb->lock)
1612{ 1642{
1613 spin_unlock(&hb->lock); 1643 spin_unlock(&hb->lock);
1644 hb_waiters_dec(hb);
1614} 1645}
1615 1646
1616/** 1647/**
@@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2342 * Unqueue the futex_q and determine which it was. 2373 * Unqueue the futex_q and determine which it was.
2343 */ 2374 */
2344 plist_del(&q->list, &hb->chain); 2375 plist_del(&q->list, &hb->chain);
2376 hb_waiters_dec(hb);
2345 2377
2346 /* Handle spurious wakeups gracefully */ 2378 /* Handle spurious wakeups gracefully */
2347 ret = -EWOULDBLOCK; 2379 ret = -EWOULDBLOCK;
@@ -2875,6 +2907,7 @@ static int __init futex_init(void)
2875 futex_cmpxchg_enabled = 1; 2907 futex_cmpxchg_enabled = 1;
2876 2908
2877 for (i = 0; i < futex_hashsize; i++) { 2909 for (i = 0; i < futex_hashsize; i++) {
2910 atomic_set(&futex_queues[i].waiters, 0);
2878 plist_head_init(&futex_queues[i].chain); 2911 plist_head_init(&futex_queues[i].chain);
2879 spin_lock_init(&futex_queues[i].lock); 2912 spin_lock_init(&futex_queues[i].lock);
2880 } 2913 }
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 43c2bcc35761..b30a2924ef14 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu)
301 if (unlikely(!sched_clock_running)) 301 if (unlikely(!sched_clock_running))
302 return 0ull; 302 return 0ull;
303 303
304 preempt_disable(); 304 preempt_disable_notrace();
305 scd = cpu_sdc(cpu); 305 scd = cpu_sdc(cpu);
306 306
307 if (cpu != smp_processor_id()) 307 if (cpu != smp_processor_id())
308 clock = sched_clock_remote(scd); 308 clock = sched_clock_remote(scd);
309 else 309 else
310 clock = sched_clock_local(scd); 310 clock = sched_clock_local(scd);
311 preempt_enable(); 311 preempt_enable_notrace();
312 312
313 return clock; 313 return clock;
314} 314}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6edbef296ece..f5c6635b806c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3338,6 +3338,15 @@ recheck:
3338 return -EPERM; 3338 return -EPERM;
3339 } 3339 }
3340 3340
3341 /*
3342 * Can't set/change SCHED_DEADLINE policy at all for now
3343 * (safest behavior); in the future we would like to allow
3344 * unprivileged DL tasks to increase their relative deadline
3345 * or reduce their runtime (both ways reducing utilization)
3346 */
3347 if (dl_policy(policy))
3348 return -EPERM;
3349
3341 /* 3350 /*
3342 * Treat SCHED_IDLE as nice 20. Only allow a switch to 3351 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3343 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 3352 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 84571e09c907..01fbae5b97b7 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
293 */ 293 */
294 smp_call_function_single(min(cpu1, cpu2), 294 smp_call_function_single(min(cpu1, cpu2),
295 &irq_cpu_stop_queue_work, 295 &irq_cpu_stop_queue_work,
296 &call_args, 0); 296 &call_args, 1);
297 lg_local_unlock(&stop_cpus_lock); 297 lg_local_unlock(&stop_cpus_lock);
298 preempt_enable(); 298 preempt_enable();
299 299
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f3989ceb5cd5..7b16d40bd64d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -27,12 +27,6 @@
27 27
28DEFINE_MUTEX(event_mutex); 28DEFINE_MUTEX(event_mutex);
29 29
30DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
36LIST_HEAD(ftrace_events); 30LIST_HEAD(ftrace_events);
37static LIST_HEAD(ftrace_common_fields); 31static LIST_HEAD(ftrace_common_fields);
38 32
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 7c3e3e72e2b6..ee0a5098ac43 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \
95#undef __array 95#undef __array
96#define __array(type, item, len) \ 96#define __array(type, item, len) \
97 do { \ 97 do { \
98 char *type_str = #type"["__stringify(len)"]"; \
98 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 99 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
99 mutex_lock(&event_storage_mutex); \ 100 ret = trace_define_field(event_call, type_str, #item, \
100 snprintf(event_storage, sizeof(event_storage), \
101 "%s[%d]", #type, len); \
102 ret = trace_define_field(event_call, event_storage, #item, \
103 offsetof(typeof(field), item), \ 101 offsetof(typeof(field), item), \
104 sizeof(field.item), \ 102 sizeof(field.item), \
105 is_signed_type(type), filter_type); \ 103 is_signed_type(type), filter_type); \
106 mutex_unlock(&event_storage_mutex); \
107 if (ret) \ 104 if (ret) \
108 return ret; \ 105 return ret; \
109 } while (0); 106 } while (0);