aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-03-16 04:50:50 -0400
committerIngo Molnar <mingo@kernel.org>2017-03-16 04:50:50 -0400
commit2b95bd7d58d368fe5dcbe6f4e494847ea082d89d (patch)
tree48c38d792f31f2fe10b26b03bdf1b10a82ca03a4 /kernel
parentffa86c2f1a8862cf58c873f6f14d4b2c3250fb48 (diff)
parent69eea5a4ab9c705496e912b55a9d312325de19e6 (diff)
Merge branch 'linus' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/hashtab.c119
-rw-r--r--kernel/bpf/lpm_trie.c6
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/cgroup.c2
-rw-r--r--kernel/cgroup/pids.c2
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/kexec_file.c8
-rw-r--r--kernel/kexec_internal.h6
-rw-r--r--kernel/locking/lockdep.c11
-rw-r--r--kernel/locking/test-ww_mutex.c6
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/cpufreq_schedutil.c19
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/features.h5
-rw-r--r--kernel/sched/wait.c39
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/trace/ftrace.c23
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/ucount.c18
-rw-r--r--kernel/workqueue.c1
21 files changed, 201 insertions, 86 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ea87fb19a94..afe5bab376c9 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,11 +13,12 @@
13#include <linux/bpf.h> 13#include <linux/bpf.h>
14#include <linux/jhash.h> 14#include <linux/jhash.h>
15#include <linux/filter.h> 15#include <linux/filter.h>
16#include <linux/rculist_nulls.h>
16#include "percpu_freelist.h" 17#include "percpu_freelist.h"
17#include "bpf_lru_list.h" 18#include "bpf_lru_list.h"
18 19
19struct bucket { 20struct bucket {
20 struct hlist_head head; 21 struct hlist_nulls_head head;
21 raw_spinlock_t lock; 22 raw_spinlock_t lock;
22}; 23};
23 24
@@ -44,9 +45,14 @@ enum extra_elem_state {
44/* each htab element is struct htab_elem + key + value */ 45/* each htab element is struct htab_elem + key + value */
45struct htab_elem { 46struct htab_elem {
46 union { 47 union {
47 struct hlist_node hash_node; 48 struct hlist_nulls_node hash_node;
48 struct bpf_htab *htab; 49 struct {
49 struct pcpu_freelist_node fnode; 50 void *padding;
51 union {
52 struct bpf_htab *htab;
53 struct pcpu_freelist_node fnode;
54 };
55 };
50 }; 56 };
51 union { 57 union {
52 struct rcu_head rcu; 58 struct rcu_head rcu;
@@ -162,7 +168,8 @@ skip_percpu_elems:
162 offsetof(struct htab_elem, lru_node), 168 offsetof(struct htab_elem, lru_node),
163 htab->elem_size, htab->map.max_entries); 169 htab->elem_size, htab->map.max_entries);
164 else 170 else
165 pcpu_freelist_populate(&htab->freelist, htab->elems, 171 pcpu_freelist_populate(&htab->freelist,
172 htab->elems + offsetof(struct htab_elem, fnode),
166 htab->elem_size, htab->map.max_entries); 173 htab->elem_size, htab->map.max_entries);
167 174
168 return 0; 175 return 0;
@@ -217,6 +224,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
217 int err, i; 224 int err, i;
218 u64 cost; 225 u64 cost;
219 226
227 BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
228 offsetof(struct htab_elem, hash_node.pprev));
229 BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
230 offsetof(struct htab_elem, hash_node.pprev));
231
220 if (lru && !capable(CAP_SYS_ADMIN)) 232 if (lru && !capable(CAP_SYS_ADMIN))
221 /* LRU implementation is much complicated than other 233 /* LRU implementation is much complicated than other
222 * maps. Hence, limit to CAP_SYS_ADMIN for now. 234 * maps. Hence, limit to CAP_SYS_ADMIN for now.
@@ -326,7 +338,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
326 goto free_htab; 338 goto free_htab;
327 339
328 for (i = 0; i < htab->n_buckets; i++) { 340 for (i = 0; i < htab->n_buckets; i++) {
329 INIT_HLIST_HEAD(&htab->buckets[i].head); 341 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
330 raw_spin_lock_init(&htab->buckets[i].lock); 342 raw_spin_lock_init(&htab->buckets[i].lock);
331 } 343 }
332 344
@@ -366,20 +378,44 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
366 return &htab->buckets[hash & (htab->n_buckets - 1)]; 378 return &htab->buckets[hash & (htab->n_buckets - 1)];
367} 379}
368 380
369static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) 381static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
370{ 382{
371 return &__select_bucket(htab, hash)->head; 383 return &__select_bucket(htab, hash)->head;
372} 384}
373 385
374static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, 386/* this lookup function can only be called with bucket lock taken */
387static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
375 void *key, u32 key_size) 388 void *key, u32 key_size)
376{ 389{
390 struct hlist_nulls_node *n;
391 struct htab_elem *l;
392
393 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
394 if (l->hash == hash && !memcmp(&l->key, key, key_size))
395 return l;
396
397 return NULL;
398}
399
400/* can be called without bucket lock. it will repeat the loop in
401 * the unlikely event when elements moved from one bucket into another
402 * while link list is being walked
403 */
404static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
405 u32 hash, void *key,
406 u32 key_size, u32 n_buckets)
407{
408 struct hlist_nulls_node *n;
377 struct htab_elem *l; 409 struct htab_elem *l;
378 410
379 hlist_for_each_entry_rcu(l, head, hash_node) 411again:
412 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
380 if (l->hash == hash && !memcmp(&l->key, key, key_size)) 413 if (l->hash == hash && !memcmp(&l->key, key, key_size))
381 return l; 414 return l;
382 415
416 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
417 goto again;
418
383 return NULL; 419 return NULL;
384} 420}
385 421
@@ -387,7 +423,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
387static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) 423static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
388{ 424{
389 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 425 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
390 struct hlist_head *head; 426 struct hlist_nulls_head *head;
391 struct htab_elem *l; 427 struct htab_elem *l;
392 u32 hash, key_size; 428 u32 hash, key_size;
393 429
@@ -400,7 +436,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
400 436
401 head = select_bucket(htab, hash); 437 head = select_bucket(htab, hash);
402 438
403 l = lookup_elem_raw(head, hash, key, key_size); 439 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
404 440
405 return l; 441 return l;
406} 442}
@@ -433,8 +469,9 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
433static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) 469static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
434{ 470{
435 struct bpf_htab *htab = (struct bpf_htab *)arg; 471 struct bpf_htab *htab = (struct bpf_htab *)arg;
436 struct htab_elem *l, *tgt_l; 472 struct htab_elem *l = NULL, *tgt_l;
437 struct hlist_head *head; 473 struct hlist_nulls_head *head;
474 struct hlist_nulls_node *n;
438 unsigned long flags; 475 unsigned long flags;
439 struct bucket *b; 476 struct bucket *b;
440 477
@@ -444,9 +481,9 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
444 481
445 raw_spin_lock_irqsave(&b->lock, flags); 482 raw_spin_lock_irqsave(&b->lock, flags);
446 483
447 hlist_for_each_entry_rcu(l, head, hash_node) 484 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
448 if (l == tgt_l) { 485 if (l == tgt_l) {
449 hlist_del_rcu(&l->hash_node); 486 hlist_nulls_del_rcu(&l->hash_node);
450 break; 487 break;
451 } 488 }
452 489
@@ -459,7 +496,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
459static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 496static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
460{ 497{
461 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 498 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
462 struct hlist_head *head; 499 struct hlist_nulls_head *head;
463 struct htab_elem *l, *next_l; 500 struct htab_elem *l, *next_l;
464 u32 hash, key_size; 501 u32 hash, key_size;
465 int i; 502 int i;
@@ -473,7 +510,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
473 head = select_bucket(htab, hash); 510 head = select_bucket(htab, hash);
474 511
475 /* lookup the key */ 512 /* lookup the key */
476 l = lookup_elem_raw(head, hash, key, key_size); 513 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
477 514
478 if (!l) { 515 if (!l) {
479 i = 0; 516 i = 0;
@@ -481,7 +518,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
481 } 518 }
482 519
483 /* key was found, get next key in the same bucket */ 520 /* key was found, get next key in the same bucket */
484 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)), 521 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
485 struct htab_elem, hash_node); 522 struct htab_elem, hash_node);
486 523
487 if (next_l) { 524 if (next_l) {
@@ -500,7 +537,7 @@ find_first_elem:
500 head = select_bucket(htab, i); 537 head = select_bucket(htab, i);
501 538
502 /* pick first element in the bucket */ 539 /* pick first element in the bucket */
503 next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), 540 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
504 struct htab_elem, hash_node); 541 struct htab_elem, hash_node);
505 if (next_l) { 542 if (next_l) {
506 /* if it's not empty, just return it */ 543 /* if it's not empty, just return it */
@@ -582,9 +619,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
582 int err = 0; 619 int err = 0;
583 620
584 if (prealloc) { 621 if (prealloc) {
585 l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); 622 struct pcpu_freelist_node *l;
586 if (!l_new) 623
624 l = pcpu_freelist_pop(&htab->freelist);
625 if (!l)
587 err = -E2BIG; 626 err = -E2BIG;
627 else
628 l_new = container_of(l, struct htab_elem, fnode);
588 } else { 629 } else {
589 if (atomic_inc_return(&htab->count) > htab->map.max_entries) { 630 if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
590 atomic_dec(&htab->count); 631 atomic_dec(&htab->count);
@@ -661,7 +702,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
661{ 702{
662 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 703 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
663 struct htab_elem *l_new = NULL, *l_old; 704 struct htab_elem *l_new = NULL, *l_old;
664 struct hlist_head *head; 705 struct hlist_nulls_head *head;
665 unsigned long flags; 706 unsigned long flags;
666 struct bucket *b; 707 struct bucket *b;
667 u32 key_size, hash; 708 u32 key_size, hash;
@@ -700,9 +741,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
700 /* add new element to the head of the list, so that 741 /* add new element to the head of the list, so that
701 * concurrent search will find it before old elem 742 * concurrent search will find it before old elem
702 */ 743 */
703 hlist_add_head_rcu(&l_new->hash_node, head); 744 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
704 if (l_old) { 745 if (l_old) {
705 hlist_del_rcu(&l_old->hash_node); 746 hlist_nulls_del_rcu(&l_old->hash_node);
706 free_htab_elem(htab, l_old); 747 free_htab_elem(htab, l_old);
707 } 748 }
708 ret = 0; 749 ret = 0;
@@ -716,7 +757,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
716{ 757{
717 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 758 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
718 struct htab_elem *l_new, *l_old = NULL; 759 struct htab_elem *l_new, *l_old = NULL;
719 struct hlist_head *head; 760 struct hlist_nulls_head *head;
720 unsigned long flags; 761 unsigned long flags;
721 struct bucket *b; 762 struct bucket *b;
722 u32 key_size, hash; 763 u32 key_size, hash;
@@ -757,10 +798,10 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
757 /* add new element to the head of the list, so that 798 /* add new element to the head of the list, so that
758 * concurrent search will find it before old elem 799 * concurrent search will find it before old elem
759 */ 800 */
760 hlist_add_head_rcu(&l_new->hash_node, head); 801 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
761 if (l_old) { 802 if (l_old) {
762 bpf_lru_node_set_ref(&l_new->lru_node); 803 bpf_lru_node_set_ref(&l_new->lru_node);
763 hlist_del_rcu(&l_old->hash_node); 804 hlist_nulls_del_rcu(&l_old->hash_node);
764 } 805 }
765 ret = 0; 806 ret = 0;
766 807
@@ -781,7 +822,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
781{ 822{
782 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 823 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
783 struct htab_elem *l_new = NULL, *l_old; 824 struct htab_elem *l_new = NULL, *l_old;
784 struct hlist_head *head; 825 struct hlist_nulls_head *head;
785 unsigned long flags; 826 unsigned long flags;
786 struct bucket *b; 827 struct bucket *b;
787 u32 key_size, hash; 828 u32 key_size, hash;
@@ -820,7 +861,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
820 ret = PTR_ERR(l_new); 861 ret = PTR_ERR(l_new);
821 goto err; 862 goto err;
822 } 863 }
823 hlist_add_head_rcu(&l_new->hash_node, head); 864 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
824 } 865 }
825 ret = 0; 866 ret = 0;
826err: 867err:
@@ -834,7 +875,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
834{ 875{
835 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 876 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
836 struct htab_elem *l_new = NULL, *l_old; 877 struct htab_elem *l_new = NULL, *l_old;
837 struct hlist_head *head; 878 struct hlist_nulls_head *head;
838 unsigned long flags; 879 unsigned long flags;
839 struct bucket *b; 880 struct bucket *b;
840 u32 key_size, hash; 881 u32 key_size, hash;
@@ -882,7 +923,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
882 } else { 923 } else {
883 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), 924 pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
884 value, onallcpus); 925 value, onallcpus);
885 hlist_add_head_rcu(&l_new->hash_node, head); 926 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
886 l_new = NULL; 927 l_new = NULL;
887 } 928 }
888 ret = 0; 929 ret = 0;
@@ -910,7 +951,7 @@ static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
910static int htab_map_delete_elem(struct bpf_map *map, void *key) 951static int htab_map_delete_elem(struct bpf_map *map, void *key)
911{ 952{
912 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 953 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
913 struct hlist_head *head; 954 struct hlist_nulls_head *head;
914 struct bucket *b; 955 struct bucket *b;
915 struct htab_elem *l; 956 struct htab_elem *l;
916 unsigned long flags; 957 unsigned long flags;
@@ -930,7 +971,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
930 l = lookup_elem_raw(head, hash, key, key_size); 971 l = lookup_elem_raw(head, hash, key, key_size);
931 972
932 if (l) { 973 if (l) {
933 hlist_del_rcu(&l->hash_node); 974 hlist_nulls_del_rcu(&l->hash_node);
934 free_htab_elem(htab, l); 975 free_htab_elem(htab, l);
935 ret = 0; 976 ret = 0;
936 } 977 }
@@ -942,7 +983,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
942static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) 983static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
943{ 984{
944 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 985 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
945 struct hlist_head *head; 986 struct hlist_nulls_head *head;
946 struct bucket *b; 987 struct bucket *b;
947 struct htab_elem *l; 988 struct htab_elem *l;
948 unsigned long flags; 989 unsigned long flags;
@@ -962,7 +1003,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
962 l = lookup_elem_raw(head, hash, key, key_size); 1003 l = lookup_elem_raw(head, hash, key, key_size);
963 1004
964 if (l) { 1005 if (l) {
965 hlist_del_rcu(&l->hash_node); 1006 hlist_nulls_del_rcu(&l->hash_node);
966 ret = 0; 1007 ret = 0;
967 } 1008 }
968 1009
@@ -977,12 +1018,12 @@ static void delete_all_elements(struct bpf_htab *htab)
977 int i; 1018 int i;
978 1019
979 for (i = 0; i < htab->n_buckets; i++) { 1020 for (i = 0; i < htab->n_buckets; i++) {
980 struct hlist_head *head = select_bucket(htab, i); 1021 struct hlist_nulls_head *head = select_bucket(htab, i);
981 struct hlist_node *n; 1022 struct hlist_nulls_node *n;
982 struct htab_elem *l; 1023 struct htab_elem *l;
983 1024
984 hlist_for_each_entry_safe(l, n, head, hash_node) { 1025 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
985 hlist_del_rcu(&l->hash_node); 1026 hlist_nulls_del_rcu(&l->hash_node);
986 if (l->state != HTAB_EXTRA_ELEM_USED) 1027 if (l->state != HTAB_EXTRA_ELEM_USED)
987 htab_elem_free(htab, l); 1028 htab_elem_free(htab, l);
988 } 1029 }
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 8bfe0afaee10..b37bd9ab7f57 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -500,9 +500,15 @@ unlock:
500 raw_spin_unlock(&trie->lock); 500 raw_spin_unlock(&trie->lock);
501} 501}
502 502
503static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
504{
505 return -ENOTSUPP;
506}
507
503static const struct bpf_map_ops trie_ops = { 508static const struct bpf_map_ops trie_ops = {
504 .map_alloc = trie_alloc, 509 .map_alloc = trie_alloc,
505 .map_free = trie_free, 510 .map_free = trie_free,
511 .map_get_next_key = trie_get_next_key,
506 .map_lookup_elem = trie_lookup_elem, 512 .map_lookup_elem = trie_lookup_elem,
507 .map_update_elem = trie_update_elem, 513 .map_update_elem = trie_update_elem,
508 .map_delete_elem = trie_delete_elem, 514 .map_delete_elem = trie_delete_elem,
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 56eba9caa632..1dc22f6b49f5 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1329,7 +1329,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
1329 struct task_struct *task; 1329 struct task_struct *task;
1330 int count = 0; 1330 int count = 0;
1331 1331
1332 seq_printf(seq, "css_set %p\n", cset); 1332 seq_printf(seq, "css_set %pK\n", cset);
1333 1333
1334 list_for_each_entry(task, &cset->tasks, cg_list) { 1334 list_for_each_entry(task, &cset->tasks, cg_list) {
1335 if (count++ > MAX_TASKS_SHOWN_PER_CSS) 1335 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 0125589c7428..48851327a15e 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2669,7 +2669,7 @@ static bool css_visible(struct cgroup_subsys_state *css)
2669 * 2669 *
2670 * Returns 0 on success, -errno on failure. On failure, csses which have 2670 * Returns 0 on success, -errno on failure. On failure, csses which have
2671 * been processed already aren't cleaned up. The caller is responsible for 2671 * been processed already aren't cleaned up. The caller is responsible for
2672 * cleaning up with cgroup_apply_control_disble(). 2672 * cleaning up with cgroup_apply_control_disable().
2673 */ 2673 */
2674static int cgroup_apply_control_enable(struct cgroup *cgrp) 2674static int cgroup_apply_control_enable(struct cgroup *cgrp)
2675{ 2675{
diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
index e756dae49300..2237201d66d5 100644
--- a/kernel/cgroup/pids.c
+++ b/kernel/cgroup/pids.c
@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
229 /* Only log the first time events_limit is incremented. */ 229 /* Only log the first time events_limit is incremented. */
230 if (atomic64_inc_return(&pids->events_limit) == 1) { 230 if (atomic64_inc_return(&pids->events_limit) == 1) {
231 pr_info("cgroup: fork rejected by pids controller in "); 231 pr_info("cgroup: fork rejected by pids controller in ");
232 pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id)); 232 pr_cont_cgroup_path(css->cgroup);
233 pr_cont("\n"); 233 pr_cont("\n");
234 } 234 }
235 cgroup_file_notify(&pids->events_file); 235 cgroup_file_notify(&pids->events_file);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 16c877a121c8..2d7990d4e988 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1001,7 +1001,7 @@ list_update_cgroup_event(struct perf_event *event,
1001 */ 1001 */
1002#define PERF_CPU_HRTIMER (1000 / HZ) 1002#define PERF_CPU_HRTIMER (1000 / HZ)
1003/* 1003/*
1004 * function must be called with interrupts disbled 1004 * function must be called with interrupts disabled
1005 */ 1005 */
1006static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr) 1006static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1007{ 1007{
diff --git a/kernel/exit.c b/kernel/exit.c
index e126ebf2400c..516acdb0e0ec 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -554,7 +554,6 @@ static void exit_mm(void)
554 enter_lazy_tlb(mm, current); 554 enter_lazy_tlb(mm, current);
555 task_unlock(current); 555 task_unlock(current);
556 mm_update_next_owner(mm); 556 mm_update_next_owner(mm);
557 userfaultfd_exit(mm);
558 mmput(mm); 557 mmput(mm);
559 if (test_thread_flag(TIF_MEMDIE)) 558 if (test_thread_flag(TIF_MEMDIE))
560 exit_oom_victim(); 559 exit_oom_victim();
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index b56a558e406d..b118735fea9d 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -614,13 +614,13 @@ static int kexec_calculate_store_digests(struct kimage *image)
614 ret = crypto_shash_final(desc, digest); 614 ret = crypto_shash_final(desc, digest);
615 if (ret) 615 if (ret)
616 goto out_free_digest; 616 goto out_free_digest;
617 ret = kexec_purgatory_get_set_symbol(image, "sha_regions", 617 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
618 sha_regions, sha_region_sz, 0); 618 sha_regions, sha_region_sz, 0);
619 if (ret) 619 if (ret)
620 goto out_free_digest; 620 goto out_free_digest;
621 621
622 ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", 622 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
623 digest, SHA256_DIGEST_SIZE, 0); 623 digest, SHA256_DIGEST_SIZE, 0);
624 if (ret) 624 if (ret)
625 goto out_free_digest; 625 goto out_free_digest;
626 } 626 }
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 4cef7e4706b0..799a8a452187 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -15,11 +15,7 @@ int kimage_is_destination_range(struct kimage *image,
15extern struct mutex kexec_mutex; 15extern struct mutex kexec_mutex;
16 16
17#ifdef CONFIG_KEXEC_FILE 17#ifdef CONFIG_KEXEC_FILE
18struct kexec_sha_region { 18#include <linux/purgatory.h>
19 unsigned long start;
20 unsigned long len;
21};
22
23void kimage_file_post_load_cleanup(struct kimage *image); 19void kimage_file_post_load_cleanup(struct kimage *image);
24#else /* CONFIG_KEXEC_FILE */ 20#else /* CONFIG_KEXEC_FILE */
25static inline void kimage_file_post_load_cleanup(struct kimage *image) { } 21static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 12e38c213b70..a95e5d1f4a9c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3262,10 +3262,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3262 if (depth) { 3262 if (depth) {
3263 hlock = curr->held_locks + depth - 1; 3263 hlock = curr->held_locks + depth - 1;
3264 if (hlock->class_idx == class_idx && nest_lock) { 3264 if (hlock->class_idx == class_idx && nest_lock) {
3265 if (hlock->references) 3265 if (hlock->references) {
3266 /*
3267 * Check: unsigned int references:12, overflow.
3268 */
3269 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3270 return 0;
3271
3266 hlock->references++; 3272 hlock->references++;
3267 else 3273 } else {
3268 hlock->references = 2; 3274 hlock->references = 2;
3275 }
3269 3276
3270 return 1; 3277 return 1;
3271 } 3278 }
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index da6c9a34f62f..6b7abb334ca6 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -50,7 +50,7 @@ static void test_mutex_work(struct work_struct *work)
50 50
51 if (mtx->flags & TEST_MTX_TRY) { 51 if (mtx->flags & TEST_MTX_TRY) {
52 while (!ww_mutex_trylock(&mtx->mutex)) 52 while (!ww_mutex_trylock(&mtx->mutex))
53 cpu_relax(); 53 cond_resched();
54 } else { 54 } else {
55 ww_mutex_lock(&mtx->mutex, NULL); 55 ww_mutex_lock(&mtx->mutex, NULL);
56 } 56 }
@@ -88,7 +88,7 @@ static int __test_mutex(unsigned int flags)
88 ret = -EINVAL; 88 ret = -EINVAL;
89 break; 89 break;
90 } 90 }
91 cpu_relax(); 91 cond_resched();
92 } while (time_before(jiffies, timeout)); 92 } while (time_before(jiffies, timeout));
93 } else { 93 } else {
94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT); 94 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
@@ -627,7 +627,7 @@ static int __init test_ww_mutex_init(void)
627 if (ret) 627 if (ret)
628 return ret; 628 return ret;
629 629
630 ret = stress(4096, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL); 630 ret = stress(4095, hweight32(STRESS_ALL)*ncpus, 1<<12, STRESS_ALL);
631 if (ret) 631 if (ret)
632 return ret; 632 return ret;
633 633
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 956383844116..3b31fc05a0f1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3287 struct task_struct *p; 3287 struct task_struct *p;
3288 3288
3289 /* 3289 /*
3290 * Optimization: we know that if all tasks are in 3290 * Optimization: we know that if all tasks are in the fair class we can
3291 * the fair class we can call that function directly: 3291 * call that function directly, but only if the @prev task wasn't of a
3292 * higher scheduling class, because otherwise those loose the
3293 * opportunity to pull in more work from other CPUs.
3292 */ 3294 */
3293 if (likely(rq->nr_running == rq->cfs.h_nr_running)) { 3295 if (likely((prev->sched_class == &idle_sched_class ||
3296 prev->sched_class == &fair_sched_class) &&
3297 rq->nr_running == rq->cfs.h_nr_running)) {
3298
3294 p = fair_sched_class.pick_next_task(rq, prev, rf); 3299 p = fair_sched_class.pick_next_task(rq, prev, rf);
3295 if (unlikely(p == RETRY_TASK)) 3300 if (unlikely(p == RETRY_TASK))
3296 goto again; 3301 goto again;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 8f8de3d4d6b7..cd7cd489f739 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -36,6 +36,7 @@ struct sugov_policy {
36 u64 last_freq_update_time; 36 u64 last_freq_update_time;
37 s64 freq_update_delay_ns; 37 s64 freq_update_delay_ns;
38 unsigned int next_freq; 38 unsigned int next_freq;
39 unsigned int cached_raw_freq;
39 40
40 /* The next fields are only needed if fast switch cannot be used. */ 41 /* The next fields are only needed if fast switch cannot be used. */
41 struct irq_work irq_work; 42 struct irq_work irq_work;
@@ -52,7 +53,6 @@ struct sugov_cpu {
52 struct update_util_data update_util; 53 struct update_util_data update_util;
53 struct sugov_policy *sg_policy; 54 struct sugov_policy *sg_policy;
54 55
55 unsigned int cached_raw_freq;
56 unsigned long iowait_boost; 56 unsigned long iowait_boost;
57 unsigned long iowait_boost_max; 57 unsigned long iowait_boost_max;
58 u64 last_update; 58 u64 last_update;
@@ -116,7 +116,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
116 116
117/** 117/**
118 * get_next_freq - Compute a new frequency for a given cpufreq policy. 118 * get_next_freq - Compute a new frequency for a given cpufreq policy.
119 * @sg_cpu: schedutil cpu object to compute the new frequency for. 119 * @sg_policy: schedutil policy object to compute the new frequency for.
120 * @util: Current CPU utilization. 120 * @util: Current CPU utilization.
121 * @max: CPU capacity. 121 * @max: CPU capacity.
122 * 122 *
@@ -136,19 +136,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
136 * next_freq (as calculated above) is returned, subject to policy min/max and 136 * next_freq (as calculated above) is returned, subject to policy min/max and
137 * cpufreq driver limitations. 137 * cpufreq driver limitations.
138 */ 138 */
139static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, 139static unsigned int get_next_freq(struct sugov_policy *sg_policy,
140 unsigned long max) 140 unsigned long util, unsigned long max)
141{ 141{
142 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
143 struct cpufreq_policy *policy = sg_policy->policy; 142 struct cpufreq_policy *policy = sg_policy->policy;
144 unsigned int freq = arch_scale_freq_invariant() ? 143 unsigned int freq = arch_scale_freq_invariant() ?
145 policy->cpuinfo.max_freq : policy->cur; 144 policy->cpuinfo.max_freq : policy->cur;
146 145
147 freq = (freq + (freq >> 2)) * util / max; 146 freq = (freq + (freq >> 2)) * util / max;
148 147
149 if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX) 148 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
150 return sg_policy->next_freq; 149 return sg_policy->next_freq;
151 sg_cpu->cached_raw_freq = freq; 150 sg_policy->cached_raw_freq = freq;
152 return cpufreq_driver_resolve_freq(policy, freq); 151 return cpufreq_driver_resolve_freq(policy, freq);
153} 152}
154 153
@@ -213,7 +212,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
213 } else { 212 } else {
214 sugov_get_util(&util, &max); 213 sugov_get_util(&util, &max);
215 sugov_iowait_boost(sg_cpu, &util, &max); 214 sugov_iowait_boost(sg_cpu, &util, &max);
216 next_f = get_next_freq(sg_cpu, util, max); 215 next_f = get_next_freq(sg_policy, util, max);
217 } 216 }
218 sugov_update_commit(sg_policy, time, next_f); 217 sugov_update_commit(sg_policy, time, next_f);
219} 218}
@@ -267,7 +266,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
267 sugov_iowait_boost(j_sg_cpu, &util, &max); 266 sugov_iowait_boost(j_sg_cpu, &util, &max);
268 } 267 }
269 268
270 return get_next_freq(sg_cpu, util, max); 269 return get_next_freq(sg_policy, util, max);
271} 270}
272 271
273static void sugov_update_shared(struct update_util_data *hook, u64 time, 272static void sugov_update_shared(struct update_util_data *hook, u64 time,
@@ -580,6 +579,7 @@ static int sugov_start(struct cpufreq_policy *policy)
580 sg_policy->next_freq = UINT_MAX; 579 sg_policy->next_freq = UINT_MAX;
581 sg_policy->work_in_progress = false; 580 sg_policy->work_in_progress = false;
582 sg_policy->need_freq_update = false; 581 sg_policy->need_freq_update = false;
582 sg_policy->cached_raw_freq = 0;
583 583
584 for_each_cpu(cpu, policy->cpus) { 584 for_each_cpu(cpu, policy->cpus) {
585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
@@ -590,7 +590,6 @@ static int sugov_start(struct cpufreq_policy *policy)
590 sg_cpu->max = 0; 590 sg_cpu->max = 0;
591 sg_cpu->flags = SCHED_CPUFREQ_RT; 591 sg_cpu->flags = SCHED_CPUFREQ_RT;
592 sg_cpu->last_update = 0; 592 sg_cpu->last_update = 0;
593 sg_cpu->cached_raw_freq = 0;
594 sg_cpu->iowait_boost = 0; 593 sg_cpu->iowait_boost = 0;
595 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; 594 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
596 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, 595 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e88b35ac157..dea138964b91 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
5799 * Due to large variance we need a large fuzz factor; hackbench in 5799 * Due to large variance we need a large fuzz factor; hackbench in
5800 * particularly is sensitive here. 5800 * particularly is sensitive here.
5801 */ 5801 */
5802 if ((avg_idle / 512) < avg_cost) 5802 if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
5803 return -1; 5803 return -1;
5804 5804
5805 time = local_clock(); 5805 time = local_clock();
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 69631fa46c2f..1b3c8189b286 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
51 */ 51 */
52SCHED_FEAT(TTWU_QUEUE, true) 52SCHED_FEAT(TTWU_QUEUE, true)
53 53
54/*
55 * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
56 */
57SCHED_FEAT(SIS_AVG_CPU, false)
58
54#ifdef HAVE_RT_PUSH_IPI 59#ifdef HAVE_RT_PUSH_IPI
55/* 60/*
56 * In order to avoid a thundering herd attack of CPUs that are 61 * In order to avoid a thundering herd attack of CPUs that are
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 4d2ea6f25568..b8c84c6dee64 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -242,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
242} 242}
243EXPORT_SYMBOL(prepare_to_wait_event); 243EXPORT_SYMBOL(prepare_to_wait_event);
244 244
245/*
246 * Note! These two wait functions are entered with the
247 * wait-queue lock held (and interrupts off in the _irq
248 * case), so there is no race with testing the wakeup
249 * condition in the caller before they add the wait
250 * entry to the wake queue.
251 */
252int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
253{
254 if (likely(list_empty(&wait->task_list)))
255 __add_wait_queue_tail(wq, wait);
256
257 set_current_state(TASK_INTERRUPTIBLE);
258 if (signal_pending(current))
259 return -ERESTARTSYS;
260
261 spin_unlock(&wq->lock);
262 schedule();
263 spin_lock(&wq->lock);
264 return 0;
265}
266EXPORT_SYMBOL(do_wait_intr);
267
268int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait)
269{
270 if (likely(list_empty(&wait->task_list)))
271 __add_wait_queue_tail(wq, wait);
272
273 set_current_state(TASK_INTERRUPTIBLE);
274 if (signal_pending(current))
275 return -ERESTARTSYS;
276
277 spin_unlock_irq(&wq->lock);
278 schedule();
279 spin_lock_irq(&wq->lock);
280 return 0;
281}
282EXPORT_SYMBOL(do_wait_intr_irq);
283
245/** 284/**
246 * finish_wait - clean up after waiting in a queue 285 * finish_wait - clean up after waiting in a queue
247 * @q: waitqueue waited on 286 * @q: waitqueue waited on
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7906b3f0c41a..497719127bf9 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -125,7 +125,7 @@ int register_refined_jiffies(long cycles_per_second)
125 shift_hz += cycles_per_tick/2; 125 shift_hz += cycles_per_tick/2;
126 do_div(shift_hz, cycles_per_tick); 126 do_div(shift_hz, cycles_per_tick);
127 /* Calculate nsec_per_tick using shift_hz */ 127 /* Calculate nsec_per_tick using shift_hz */
128 nsec_per_tick = (u64)TICK_NSEC << 8; 128 nsec_per_tick = (u64)NSEC_PER_SEC << 8;
129 nsec_per_tick += (u32)shift_hz/2; 129 nsec_per_tick += (u32)shift_hz/2;
130 do_div(nsec_per_tick, (u32)shift_hz); 130 do_div(nsec_per_tick, (u32)shift_hz);
131 131
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d1597c9ee30..b9691ee8f6c1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4416,16 +4416,24 @@ static int __init set_graph_notrace_function(char *str)
4416} 4416}
4417__setup("ftrace_graph_notrace=", set_graph_notrace_function); 4417__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4418 4418
4419static int __init set_graph_max_depth_function(char *str)
4420{
4421 if (!str)
4422 return 0;
4423 fgraph_max_depth = simple_strtoul(str, NULL, 0);
4424 return 1;
4425}
4426__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4427
4419static void __init set_ftrace_early_graph(char *buf, int enable) 4428static void __init set_ftrace_early_graph(char *buf, int enable)
4420{ 4429{
4421 int ret; 4430 int ret;
4422 char *func; 4431 char *func;
4423 struct ftrace_hash *hash; 4432 struct ftrace_hash *hash;
4424 4433
4425 if (enable) 4434 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4426 hash = ftrace_graph_hash; 4435 if (WARN_ON(!hash))
4427 else 4436 return;
4428 hash = ftrace_graph_notrace_hash;
4429 4437
4430 while (buf) { 4438 while (buf) {
4431 func = strsep(&buf, ","); 4439 func = strsep(&buf, ",");
@@ -4435,6 +4443,11 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
4435 printk(KERN_DEBUG "ftrace: function %s not " 4443 printk(KERN_DEBUG "ftrace: function %s not "
4436 "traceable\n", func); 4444 "traceable\n", func);
4437 } 4445 }
4446
4447 if (enable)
4448 ftrace_graph_hash = hash;
4449 else
4450 ftrace_graph_notrace_hash = hash;
4438} 4451}
4439#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4452#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4440 4453
@@ -5488,7 +5501,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
5488 * Normally the mcount trampoline will call the ops->func, but there 5501 * Normally the mcount trampoline will call the ops->func, but there
5489 * are times that it should not. For example, if the ops does not 5502 * are times that it should not. For example, if the ops does not
5490 * have its own recursion protection, then it should call the 5503 * have its own recursion protection, then it should call the
5491 * ftrace_ops_recurs_func() instead. 5504 * ftrace_ops_assist_func() instead.
5492 * 5505 *
5493 * Returns the function that the trampoline should call for @ops. 5506 * Returns the function that the trampoline should call for @ops.
5494 */ 5507 */
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 1d68b5b7ad41..5fb1f2c87e6b 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -65,7 +65,7 @@ void stack_trace_print(void)
65} 65}
66 66
67/* 67/*
68 * When arch-specific code overides this function, the following 68 * When arch-specific code overrides this function, the following
69 * data should be filled up, assuming stack_trace_max_lock is held to 69 * data should be filled up, assuming stack_trace_max_lock is held to
70 * prevent concurrent updates. 70 * prevent concurrent updates.
71 * stack_trace_index[] 71 * stack_trace_index[]
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 62630a40ab3a..b4eeee03934f 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
144 144
145 new->ns = ns; 145 new->ns = ns;
146 new->uid = uid; 146 new->uid = uid;
147 atomic_set(&new->count, 0); 147 new->count = 0;
148 148
149 spin_lock_irq(&ucounts_lock); 149 spin_lock_irq(&ucounts_lock);
150 ucounts = find_ucounts(ns, uid, hashent); 150 ucounts = find_ucounts(ns, uid, hashent);
@@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
155 ucounts = new; 155 ucounts = new;
156 } 156 }
157 } 157 }
158 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) 158 if (ucounts->count == INT_MAX)
159 ucounts = NULL; 159 ucounts = NULL;
160 else
161 ucounts->count += 1;
160 spin_unlock_irq(&ucounts_lock); 162 spin_unlock_irq(&ucounts_lock);
161 return ucounts; 163 return ucounts;
162} 164}
@@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
165{ 167{
166 unsigned long flags; 168 unsigned long flags;
167 169
168 if (atomic_dec_and_test(&ucounts->count)) { 170 spin_lock_irqsave(&ucounts_lock, flags);
169 spin_lock_irqsave(&ucounts_lock, flags); 171 ucounts->count -= 1;
172 if (!ucounts->count)
170 hlist_del_init(&ucounts->node); 173 hlist_del_init(&ucounts->node);
171 spin_unlock_irqrestore(&ucounts_lock, flags); 174 else
175 ucounts = NULL;
176 spin_unlock_irqrestore(&ucounts_lock, flags);
172 177
173 kfree(ucounts); 178 kfree(ucounts);
174 }
175} 179}
176 180
177static inline bool atomic_inc_below(atomic_t *v, int u) 181static inline bool atomic_inc_below(atomic_t *v, int u)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 072cbc9b175d..c0168b7da1ea 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1507,6 +1507,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1507 struct timer_list *timer = &dwork->timer; 1507 struct timer_list *timer = &dwork->timer;
1508 struct work_struct *work = &dwork->work; 1508 struct work_struct *work = &dwork->work;
1509 1509
1510 WARN_ON_ONCE(!wq);
1510 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1511 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1511 timer->data != (unsigned long)dwork); 1512 timer->data != (unsigned long)dwork);
1512 WARN_ON_ONCE(timer_pending(timer)); 1513 WARN_ON_ONCE(timer_pending(timer));