aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2018-09-05 18:21:22 -0400
committerJason Gunthorpe <jgg@mellanox.com>2018-09-05 18:21:22 -0400
commit2c910cb75e1fe6de52d95c8e32caedd1629a33a5 (patch)
tree94a0eea6f8cde689d11e7583ddd0a930b8785ab4 /kernel
parent627212c9d49ba2759b699450f5d8f45f73e062fa (diff)
parentb53b1c08a23eb1091982daacb2122f90a7094a77 (diff)
Merge branch 'uverbs_dev_cleanups' into rdma.git for-next
For dependencies, branch based on rdma.git 'for-rc' of https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git/ Pull 'uverbs_dev_cleanups' from Leon Romanovsky: ==================== Reuse the char device code interfaces to simplify ib_uverbs_device creation and destruction. As part of this series, we are sending fix to cleanup path, which was discovered during internal review, The fix definitely can go to -rc, but it means that this series will be dependent on rdma-rc. ==================== * branch 'uverbs_dev_cleanups': RDMA/uverbs: Use device.groups to initialize device attributes RDMA/uverbs: Use cdev_device_add() instead of cdev_add() RDMA/core: Depend on device_add() to add device attributes RDMA/uverbs: Fix error cleanup path of ib_uverbs_add_one() Resolved conflict in ib_device_unregister_sysfs() Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/hashtab.c23
-rw-r--r--kernel/bpf/sockmap.c11
-rw-r--r--kernel/cpu.c26
-rw-r--r--kernel/printk/printk.c1
-rw-r--r--kernel/watchdog.c4
-rw-r--r--kernel/watchdog_hld.c2
-rw-r--r--kernel/workqueue.c2
7 files changed, 30 insertions, 39 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 04b8eda94e7d..03cc59ee9c95 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -15,6 +15,7 @@
15#include <linux/jhash.h> 15#include <linux/jhash.h>
16#include <linux/filter.h> 16#include <linux/filter.h>
17#include <linux/rculist_nulls.h> 17#include <linux/rculist_nulls.h>
18#include <linux/random.h>
18#include <uapi/linux/btf.h> 19#include <uapi/linux/btf.h>
19#include "percpu_freelist.h" 20#include "percpu_freelist.h"
20#include "bpf_lru_list.h" 21#include "bpf_lru_list.h"
@@ -41,6 +42,7 @@ struct bpf_htab {
41 atomic_t count; /* number of elements in this hashtable */ 42 atomic_t count; /* number of elements in this hashtable */
42 u32 n_buckets; /* number of hash buckets */ 43 u32 n_buckets; /* number of hash buckets */
43 u32 elem_size; /* size of each element in bytes */ 44 u32 elem_size; /* size of each element in bytes */
45 u32 hashrnd;
44}; 46};
45 47
46/* each htab element is struct htab_elem + key + value */ 48/* each htab element is struct htab_elem + key + value */
@@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
371 if (!htab->buckets) 373 if (!htab->buckets)
372 goto free_htab; 374 goto free_htab;
373 375
376 htab->hashrnd = get_random_int();
374 for (i = 0; i < htab->n_buckets; i++) { 377 for (i = 0; i < htab->n_buckets; i++) {
375 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); 378 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
376 raw_spin_lock_init(&htab->buckets[i].lock); 379 raw_spin_lock_init(&htab->buckets[i].lock);
@@ -402,9 +405,9 @@ free_htab:
402 return ERR_PTR(err); 405 return ERR_PTR(err);
403} 406}
404 407
405static inline u32 htab_map_hash(const void *key, u32 key_len) 408static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
406{ 409{
407 return jhash(key, key_len, 0); 410 return jhash(key, key_len, hashrnd);
408} 411}
409 412
410static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) 413static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
@@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
470 473
471 key_size = map->key_size; 474 key_size = map->key_size;
472 475
473 hash = htab_map_hash(key, key_size); 476 hash = htab_map_hash(key, key_size, htab->hashrnd);
474 477
475 head = select_bucket(htab, hash); 478 head = select_bucket(htab, hash);
476 479
@@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
597 if (!key) 600 if (!key)
598 goto find_first_elem; 601 goto find_first_elem;
599 602
600 hash = htab_map_hash(key, key_size); 603 hash = htab_map_hash(key, key_size, htab->hashrnd);
601 604
602 head = select_bucket(htab, hash); 605 head = select_bucket(htab, hash);
603 606
@@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
824 827
825 key_size = map->key_size; 828 key_size = map->key_size;
826 829
827 hash = htab_map_hash(key, key_size); 830 hash = htab_map_hash(key, key_size, htab->hashrnd);
828 831
829 b = __select_bucket(htab, hash); 832 b = __select_bucket(htab, hash);
830 head = &b->head; 833 head = &b->head;
@@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
880 883
881 key_size = map->key_size; 884 key_size = map->key_size;
882 885
883 hash = htab_map_hash(key, key_size); 886 hash = htab_map_hash(key, key_size, htab->hashrnd);
884 887
885 b = __select_bucket(htab, hash); 888 b = __select_bucket(htab, hash);
886 head = &b->head; 889 head = &b->head;
@@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
945 948
946 key_size = map->key_size; 949 key_size = map->key_size;
947 950
948 hash = htab_map_hash(key, key_size); 951 hash = htab_map_hash(key, key_size, htab->hashrnd);
949 952
950 b = __select_bucket(htab, hash); 953 b = __select_bucket(htab, hash);
951 head = &b->head; 954 head = &b->head;
@@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
998 1001
999 key_size = map->key_size; 1002 key_size = map->key_size;
1000 1003
1001 hash = htab_map_hash(key, key_size); 1004 hash = htab_map_hash(key, key_size, htab->hashrnd);
1002 1005
1003 b = __select_bucket(htab, hash); 1006 b = __select_bucket(htab, hash);
1004 head = &b->head; 1007 head = &b->head;
@@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
1071 1074
1072 key_size = map->key_size; 1075 key_size = map->key_size;
1073 1076
1074 hash = htab_map_hash(key, key_size); 1077 hash = htab_map_hash(key, key_size, htab->hashrnd);
1075 b = __select_bucket(htab, hash); 1078 b = __select_bucket(htab, hash);
1076 head = &b->head; 1079 head = &b->head;
1077 1080
@@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1103 1106
1104 key_size = map->key_size; 1107 key_size = map->key_size;
1105 1108
1106 hash = htab_map_hash(key, key_size); 1109 hash = htab_map_hash(key, key_size, htab->hashrnd);
1107 b = __select_bucket(htab, hash); 1110 b = __select_bucket(htab, hash);
1108 head = &b->head; 1111 head = &b->head;
1109 1112
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 98e621a29e8e..cf5195c7c331 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1427,12 +1427,15 @@ out:
1427static void smap_write_space(struct sock *sk) 1427static void smap_write_space(struct sock *sk)
1428{ 1428{
1429 struct smap_psock *psock; 1429 struct smap_psock *psock;
1430 void (*write_space)(struct sock *sk);
1430 1431
1431 rcu_read_lock(); 1432 rcu_read_lock();
1432 psock = smap_psock_sk(sk); 1433 psock = smap_psock_sk(sk);
1433 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) 1434 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1434 schedule_work(&psock->tx_work); 1435 schedule_work(&psock->tx_work);
1436 write_space = psock->save_write_space;
1435 rcu_read_unlock(); 1437 rcu_read_unlock();
1438 write_space(sk);
1436} 1439}
1437 1440
1438static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) 1441static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
@@ -2140,7 +2143,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
2140 return ERR_PTR(-EPERM); 2143 return ERR_PTR(-EPERM);
2141 2144
2142 /* check sanity of attributes */ 2145 /* check sanity of attributes */
2143 if (attr->max_entries == 0 || attr->value_size != 4 || 2146 if (attr->max_entries == 0 ||
2147 attr->key_size == 0 ||
2148 attr->value_size != 4 ||
2144 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 2149 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
2145 return ERR_PTR(-EINVAL); 2150 return ERR_PTR(-EINVAL);
2146 2151
@@ -2267,8 +2272,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
2267 } 2272 }
2268 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, 2273 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
2269 htab->map.numa_node); 2274 htab->map.numa_node);
2270 if (!l_new) 2275 if (!l_new) {
2276 atomic_dec(&htab->count);
2271 return ERR_PTR(-ENOMEM); 2277 return ERR_PTR(-ENOMEM);
2278 }
2272 2279
2273 memcpy(l_new->key, key, key_size); 2280 memcpy(l_new->key, key, key_size);
2274 l_new->sk = sk; 2281 l_new->sk = sk;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index ed44d7d34c2d..aa7fe85ad62e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { }
102 * @name: Name of the step 102 * @name: Name of the step
103 * @startup: Startup function of the step 103 * @startup: Startup function of the step
104 * @teardown: Teardown function of the step 104 * @teardown: Teardown function of the step
105 * @skip_onerr: Do not invoke the functions on error rollback
106 * Will go away once the notifiers are gone
107 * @cant_stop: Bringup/teardown can't be stopped at this step 105 * @cant_stop: Bringup/teardown can't be stopped at this step
108 */ 106 */
109struct cpuhp_step { 107struct cpuhp_step {
@@ -119,7 +117,6 @@ struct cpuhp_step {
119 struct hlist_node *node); 117 struct hlist_node *node);
120 } teardown; 118 } teardown;
121 struct hlist_head list; 119 struct hlist_head list;
122 bool skip_onerr;
123 bool cant_stop; 120 bool cant_stop;
124 bool multi_instance; 121 bool multi_instance;
125}; 122};
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu)
550 547
551static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) 548static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
552{ 549{
553 for (st->state--; st->state > st->target; st->state--) { 550 for (st->state--; st->state > st->target; st->state--)
554 struct cpuhp_step *step = cpuhp_get_step(st->state); 551 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
555
556 if (!step->skip_onerr)
557 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
558 }
559} 552}
560 553
561static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 554static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
644 637
645 WARN_ON_ONCE(!cpuhp_is_ap_state(state)); 638 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
646 639
647 if (st->rollback) {
648 struct cpuhp_step *step = cpuhp_get_step(state);
649 if (step->skip_onerr)
650 goto next;
651 }
652
653 if (cpuhp_is_atomic_state(state)) { 640 if (cpuhp_is_atomic_state(state)) {
654 local_irq_disable(); 641 local_irq_disable();
655 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); 642 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
673 st->should_run = false; 660 st->should_run = false;
674 } 661 }
675 662
676next:
677 cpuhp_lock_release(bringup); 663 cpuhp_lock_release(bringup);
678 664
679 if (!st->should_run) 665 if (!st->should_run)
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void)
916 902
917static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) 903static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
918{ 904{
919 for (st->state++; st->state < st->target; st->state++) { 905 for (st->state++; st->state < st->target; st->state++)
920 struct cpuhp_step *step = cpuhp_get_step(st->state); 906 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
921
922 if (!step->skip_onerr)
923 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
924 }
925} 907}
926 908
927static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 909static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 924e37fb1620..fd6f8ed28e01 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -38,7 +38,6 @@
38#include <linux/kmsg_dump.h> 38#include <linux/kmsg_dump.h>
39#include <linux/syslog.h> 39#include <linux/syslog.h>
40#include <linux/cpu.h> 40#include <linux/cpu.h>
41#include <linux/notifier.h>
42#include <linux/rculist.h> 41#include <linux/rculist.h>
43#include <linux/poll.h> 42#include <linux/poll.h>
44#include <linux/irq_work.h> 43#include <linux/irq_work.h>
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 5470dce212c0..977918d5d350 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -261,7 +261,7 @@ static void __touch_watchdog(void)
261 * entering idle state. This should only be used for scheduler events. 261 * entering idle state. This should only be used for scheduler events.
262 * Use touch_softlockup_watchdog() for everything else. 262 * Use touch_softlockup_watchdog() for everything else.
263 */ 263 */
264void touch_softlockup_watchdog_sched(void) 264notrace void touch_softlockup_watchdog_sched(void)
265{ 265{
266 /* 266 /*
267 * Preemption can be enabled. It doesn't matter which CPU's timestamp 267 * Preemption can be enabled. It doesn't matter which CPU's timestamp
@@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void)
270 raw_cpu_write(watchdog_touch_ts, 0); 270 raw_cpu_write(watchdog_touch_ts, 0);
271} 271}
272 272
273void touch_softlockup_watchdog(void) 273notrace void touch_softlockup_watchdog(void)
274{ 274{
275 touch_softlockup_watchdog_sched(); 275 touch_softlockup_watchdog_sched();
276 wq_watchdog_touch(raw_smp_processor_id()); 276 wq_watchdog_touch(raw_smp_processor_id());
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 1f7020d65d0a..71381168dede 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
29static unsigned long hardlockup_allcpu_dumped; 29static unsigned long hardlockup_allcpu_dumped;
30static atomic_t watchdog_cpus = ATOMIC_INIT(0); 30static atomic_t watchdog_cpus = ATOMIC_INIT(0);
31 31
32void arch_touch_nmi_watchdog(void) 32notrace void arch_touch_nmi_watchdog(void)
33{ 33{
34 /* 34 /*
35 * Using __raw here because some code paths have 35 * Using __raw here because some code paths have
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 60e80198c3df..0280deac392e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
5574 mod_timer(&wq_watchdog_timer, jiffies + thresh); 5574 mod_timer(&wq_watchdog_timer, jiffies + thresh);
5575} 5575}
5576 5576
5577void wq_watchdog_touch(int cpu) 5577notrace void wq_watchdog_touch(int cpu)
5578{ 5578{
5579 if (cpu >= 0) 5579 if (cpu >= 0)
5580 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 5580 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;