diff options
author | David S. Miller <davem@davemloft.net> | 2017-12-11 09:58:39 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-12-11 09:58:39 -0500 |
commit | 9944a0f2f502e4501fccb1dc0a64a6012c83dd97 (patch) | |
tree | 98769c3324dd57bc2070c08852c9bf1f272ba2b8 | |
parent | a0b586fa75a69578ecf10b40582eed9b35de2432 (diff) | |
parent | 64e0cd0d3540dbbdf6661943025409e6b31d5178 (diff) |
Merge branch 'rhashtable-New-features-in-walk-and-bucket'
Tom Herbert says:
====================
rhashtable: New features in walk and bucket
This patch contains some changes to related rhashtable:
- Above allow rhashtable_walk_start to return void
- Add a functon to peek at the next entry during a walk
- Abstract out function to compute a has for a table
- A library function to alloc a spinlocks bucket array
- Call the above function for rhashtable locks allocation
Tested: Exercised using various operations on an ILA xlat
table.
v2:
- Apply feedback from Herbert. Don't change semantics of resize
event reporting and -EAGAIN, just simplify API for callers that
ignore those.
- Add end_of_table in iter to reliably tell when the iterator has
reached to the eno.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 7 | ||||
-rw-r--r-- | fs/gfs2/glock.c | 7 | ||||
-rw-r--r-- | include/linux/rhashtable.h | 38 | ||||
-rw-r--r-- | include/linux/spinlock.h | 6 | ||||
-rw-r--r-- | include/net/sctp/sctp.h | 2 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bucket_locks.c | 54 | ||||
-rw-r--r-- | lib/rhashtable.c | 160 | ||||
-rw-r--r-- | lib/test_rhashtable.c | 6 | ||||
-rw-r--r-- | net/ipv6/ila/ila_xlat.c | 4 | ||||
-rw-r--r-- | net/ipv6/seg6.c | 4 | ||||
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 34 | ||||
-rw-r--r-- | net/netfilter/nft_set_hash.c | 10 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 5 | ||||
-rw-r--r-- | net/netlink/diag.c | 8 | ||||
-rw-r--r-- | net/sctp/proc.c | 6 | ||||
-rw-r--r-- | net/sctp/socket.c | 19 | ||||
-rw-r--r-- | net/tipc/socket.c | 6 |
19 files changed, 224 insertions, 160 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 9807214da206..2ae5ed151369 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
@@ -1412,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp, | |||
1412 | void *flow_node; | 1412 | void *flow_node; |
1413 | int rc, i; | 1413 | int rc, i; |
1414 | 1414 | ||
1415 | rc = rhashtable_walk_start(iter); | 1415 | rhashtable_walk_start(iter); |
1416 | if (rc && rc != -EAGAIN) { | ||
1417 | i = 0; | ||
1418 | goto done; | ||
1419 | } | ||
1420 | 1416 | ||
1421 | rc = 0; | 1417 | rc = 0; |
1422 | for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) { | 1418 | for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index a12b894f135d..9b9f3f99b39d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | |||
@@ -763,9 +763,7 @@ static void ch_flower_stats_handler(struct work_struct *work) | |||
763 | 763 | ||
764 | rhashtable_walk_enter(&adap->flower_tbl, &iter); | 764 | rhashtable_walk_enter(&adap->flower_tbl, &iter); |
765 | do { | 765 | do { |
766 | flower_entry = ERR_PTR(rhashtable_walk_start(&iter)); | 766 | rhashtable_walk_start(&iter); |
767 | if (IS_ERR(flower_entry)) | ||
768 | goto walk_stop; | ||
769 | 767 | ||
770 | while ((flower_entry = rhashtable_walk_next(&iter)) && | 768 | while ((flower_entry = rhashtable_walk_next(&iter)) && |
771 | !IS_ERR(flower_entry)) { | 769 | !IS_ERR(flower_entry)) { |
@@ -784,8 +782,9 @@ static void ch_flower_stats_handler(struct work_struct *work) | |||
784 | spin_unlock(&flower_entry->lock); | 782 | spin_unlock(&flower_entry->lock); |
785 | } | 783 | } |
786 | } | 784 | } |
787 | walk_stop: | 785 | |
788 | rhashtable_walk_stop(&iter); | 786 | rhashtable_walk_stop(&iter); |
787 | |||
789 | } while (flower_entry == ERR_PTR(-EAGAIN)); | 788 | } while (flower_entry == ERR_PTR(-EAGAIN)); |
790 | rhashtable_walk_exit(&iter); | 789 | rhashtable_walk_exit(&iter); |
791 | mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); | 790 | mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 11066d8647d2..90af87ff29ba 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1549,16 +1549,13 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) | |||
1549 | rhashtable_walk_enter(&gl_hash_table, &iter); | 1549 | rhashtable_walk_enter(&gl_hash_table, &iter); |
1550 | 1550 | ||
1551 | do { | 1551 | do { |
1552 | gl = ERR_PTR(rhashtable_walk_start(&iter)); | 1552 | rhashtable_walk_start(&iter); |
1553 | if (IS_ERR(gl)) | ||
1554 | goto walk_stop; | ||
1555 | 1553 | ||
1556 | while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) | 1554 | while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) |
1557 | if (gl->gl_name.ln_sbd == sdp && | 1555 | if (gl->gl_name.ln_sbd == sdp && |
1558 | lockref_get_not_dead(&gl->gl_lockref)) | 1556 | lockref_get_not_dead(&gl->gl_lockref)) |
1559 | examiner(gl); | 1557 | examiner(gl); |
1560 | 1558 | ||
1561 | walk_stop: | ||
1562 | rhashtable_walk_stop(&iter); | 1559 | rhashtable_walk_stop(&iter); |
1563 | } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); | 1560 | } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); |
1564 | 1561 | ||
@@ -1947,7 +1944,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) | |||
1947 | loff_t n = *pos; | 1944 | loff_t n = *pos; |
1948 | 1945 | ||
1949 | rhashtable_walk_enter(&gl_hash_table, &gi->hti); | 1946 | rhashtable_walk_enter(&gl_hash_table, &gi->hti); |
1950 | if (rhashtable_walk_start(&gi->hti) != 0) | 1947 | if (rhashtable_walk_start_check(&gi->hti) != 0) |
1951 | return NULL; | 1948 | return NULL; |
1952 | 1949 | ||
1953 | do { | 1950 | do { |
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 361c08e35dbc..c9df2527e0cd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -207,6 +207,7 @@ struct rhashtable_iter { | |||
207 | struct rhashtable_walker walker; | 207 | struct rhashtable_walker walker; |
208 | unsigned int slot; | 208 | unsigned int slot; |
209 | unsigned int skip; | 209 | unsigned int skip; |
210 | bool end_of_table; | ||
210 | }; | 211 | }; |
211 | 212 | ||
212 | static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) | 213 | static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) |
@@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, | |||
239 | return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); | 240 | return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); |
240 | } | 241 | } |
241 | 242 | ||
242 | static inline unsigned int rht_key_hashfn( | 243 | static inline unsigned int rht_key_get_hash(struct rhashtable *ht, |
243 | struct rhashtable *ht, const struct bucket_table *tbl, | 244 | const void *key, const struct rhashtable_params params, |
244 | const void *key, const struct rhashtable_params params) | 245 | unsigned int hash_rnd) |
245 | { | 246 | { |
246 | unsigned int hash; | 247 | unsigned int hash; |
247 | 248 | ||
248 | /* params must be equal to ht->p if it isn't constant. */ | 249 | /* params must be equal to ht->p if it isn't constant. */ |
249 | if (!__builtin_constant_p(params.key_len)) | 250 | if (!__builtin_constant_p(params.key_len)) |
250 | hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); | 251 | hash = ht->p.hashfn(key, ht->key_len, hash_rnd); |
251 | else if (params.key_len) { | 252 | else if (params.key_len) { |
252 | unsigned int key_len = params.key_len; | 253 | unsigned int key_len = params.key_len; |
253 | 254 | ||
254 | if (params.hashfn) | 255 | if (params.hashfn) |
255 | hash = params.hashfn(key, key_len, tbl->hash_rnd); | 256 | hash = params.hashfn(key, key_len, hash_rnd); |
256 | else if (key_len & (sizeof(u32) - 1)) | 257 | else if (key_len & (sizeof(u32) - 1)) |
257 | hash = jhash(key, key_len, tbl->hash_rnd); | 258 | hash = jhash(key, key_len, hash_rnd); |
258 | else | 259 | else |
259 | hash = jhash2(key, key_len / sizeof(u32), | 260 | hash = jhash2(key, key_len / sizeof(u32), hash_rnd); |
260 | tbl->hash_rnd); | ||
261 | } else { | 261 | } else { |
262 | unsigned int key_len = ht->p.key_len; | 262 | unsigned int key_len = ht->p.key_len; |
263 | 263 | ||
264 | if (params.hashfn) | 264 | if (params.hashfn) |
265 | hash = params.hashfn(key, key_len, tbl->hash_rnd); | 265 | hash = params.hashfn(key, key_len, hash_rnd); |
266 | else | 266 | else |
267 | hash = jhash(key, key_len, tbl->hash_rnd); | 267 | hash = jhash(key, key_len, hash_rnd); |
268 | } | 268 | } |
269 | 269 | ||
270 | return hash; | ||
271 | } | ||
272 | |||
273 | static inline unsigned int rht_key_hashfn( | ||
274 | struct rhashtable *ht, const struct bucket_table *tbl, | ||
275 | const void *key, const struct rhashtable_params params) | ||
276 | { | ||
277 | unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); | ||
278 | |||
270 | return rht_bucket_index(tbl, hash); | 279 | return rht_bucket_index(tbl, hash); |
271 | } | 280 | } |
272 | 281 | ||
@@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |||
378 | void rhashtable_walk_enter(struct rhashtable *ht, | 387 | void rhashtable_walk_enter(struct rhashtable *ht, |
379 | struct rhashtable_iter *iter); | 388 | struct rhashtable_iter *iter); |
380 | void rhashtable_walk_exit(struct rhashtable_iter *iter); | 389 | void rhashtable_walk_exit(struct rhashtable_iter *iter); |
381 | int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); | 390 | int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); |
391 | |||
392 | static inline void rhashtable_walk_start(struct rhashtable_iter *iter) | ||
393 | { | ||
394 | (void)rhashtable_walk_start_check(iter); | ||
395 | } | ||
396 | |||
382 | void *rhashtable_walk_next(struct rhashtable_iter *iter); | 397 | void *rhashtable_walk_next(struct rhashtable_iter *iter); |
398 | void *rhashtable_walk_peek(struct rhashtable_iter *iter); | ||
383 | void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); | 399 | void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); |
384 | 400 | ||
385 | void rhashtable_free_and_destroy(struct rhashtable *ht, | 401 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a39186194cd6..10fd28b118ee 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -414,4 +414,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
414 | #define atomic_dec_and_lock(atomic, lock) \ | 414 | #define atomic_dec_and_lock(atomic, lock) \ |
415 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) | 415 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
416 | 416 | ||
417 | int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, | ||
418 | size_t max_size, unsigned int cpu_mult, | ||
419 | gfp_t gfp); | ||
420 | |||
421 | void free_bucket_spinlocks(spinlock_t *locks); | ||
422 | |||
417 | #endif /* __LINUX_SPINLOCK_H */ | 423 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 906a9c0efa71..6f79415f6634 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated; | |||
116 | int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); | 116 | int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); |
117 | struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); | 117 | struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); |
118 | 118 | ||
119 | int sctp_transport_walk_start(struct rhashtable_iter *iter); | 119 | void sctp_transport_walk_start(struct rhashtable_iter *iter); |
120 | void sctp_transport_walk_stop(struct rhashtable_iter *iter); | 120 | void sctp_transport_walk_stop(struct rhashtable_iter *iter); |
121 | struct sctp_transport *sctp_transport_get_next(struct net *net, | 121 | struct sctp_transport *sctp_transport_get_next(struct net *net, |
122 | struct rhashtable_iter *iter); | 122 | struct rhashtable_iter *iter); |
diff --git a/lib/Makefile b/lib/Makefile index d11c48ec8ffd..a6c8529dd9b2 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ | |||
39 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ | 39 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
40 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ | 40 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ |
41 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ | 41 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ |
42 | once.o refcount.o usercopy.o errseq.o | 42 | once.o refcount.o usercopy.o errseq.o bucket_locks.o |
43 | obj-$(CONFIG_STRING_SELFTEST) += test_string.o | 43 | obj-$(CONFIG_STRING_SELFTEST) += test_string.o |
44 | obj-y += string_helpers.o | 44 | obj-y += string_helpers.o |
45 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 45 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c new file mode 100644 index 000000000000..266a97c5708b --- /dev/null +++ b/lib/bucket_locks.c | |||
@@ -0,0 +1,54 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/mm.h> | ||
4 | #include <linux/slab.h> | ||
5 | #include <linux/vmalloc.h> | ||
6 | |||
7 | /* Allocate an array of spinlocks to be accessed by a hash. Two arguments | ||
8 | * indicate the number of elements to allocate in the array. max_size | ||
9 | * gives the maximum number of elements to allocate. cpu_mult gives | ||
10 | * the number of locks per CPU to allocate. The size is rounded up | ||
11 | * to a power of 2 to be suitable as a hash table. | ||
12 | */ | ||
13 | |||
14 | int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask, | ||
15 | size_t max_size, unsigned int cpu_mult, gfp_t gfp) | ||
16 | { | ||
17 | spinlock_t *tlocks = NULL; | ||
18 | unsigned int i, size; | ||
19 | #if defined(CONFIG_PROVE_LOCKING) | ||
20 | unsigned int nr_pcpus = 2; | ||
21 | #else | ||
22 | unsigned int nr_pcpus = num_possible_cpus(); | ||
23 | #endif | ||
24 | |||
25 | if (cpu_mult) { | ||
26 | nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); | ||
27 | size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size); | ||
28 | } else { | ||
29 | size = max_size; | ||
30 | } | ||
31 | |||
32 | if (sizeof(spinlock_t) != 0) { | ||
33 | if (gfpflags_allow_blocking(gfp)) | ||
34 | tlocks = kvmalloc(size * sizeof(spinlock_t), gfp); | ||
35 | else | ||
36 | tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp); | ||
37 | if (!tlocks) | ||
38 | return -ENOMEM; | ||
39 | for (i = 0; i < size; i++) | ||
40 | spin_lock_init(&tlocks[i]); | ||
41 | } | ||
42 | |||
43 | *locks = tlocks; | ||
44 | *locks_mask = size - 1; | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | EXPORT_SYMBOL(alloc_bucket_spinlocks); | ||
49 | |||
50 | void free_bucket_spinlocks(spinlock_t *locks) | ||
51 | { | ||
52 | kvfree(locks); | ||
53 | } | ||
54 | EXPORT_SYMBOL(free_bucket_spinlocks); | ||
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index ddd7dde87c3c..3825c30aaa36 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | |||
65 | #define ASSERT_RHT_MUTEX(HT) | 65 | #define ASSERT_RHT_MUTEX(HT) |
66 | #endif | 66 | #endif |
67 | 67 | ||
68 | |||
69 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, | ||
70 | gfp_t gfp) | ||
71 | { | ||
72 | unsigned int i, size; | ||
73 | #if defined(CONFIG_PROVE_LOCKING) | ||
74 | unsigned int nr_pcpus = 2; | ||
75 | #else | ||
76 | unsigned int nr_pcpus = num_possible_cpus(); | ||
77 | #endif | ||
78 | |||
79 | nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); | ||
80 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); | ||
81 | |||
82 | /* Never allocate more than 0.5 locks per bucket */ | ||
83 | size = min_t(unsigned int, size, tbl->size >> 1); | ||
84 | |||
85 | if (tbl->nest) | ||
86 | size = min(size, 1U << tbl->nest); | ||
87 | |||
88 | if (sizeof(spinlock_t) != 0) { | ||
89 | if (gfpflags_allow_blocking(gfp)) | ||
90 | tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp); | ||
91 | else | ||
92 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | ||
93 | gfp); | ||
94 | if (!tbl->locks) | ||
95 | return -ENOMEM; | ||
96 | for (i = 0; i < size; i++) | ||
97 | spin_lock_init(&tbl->locks[i]); | ||
98 | } | ||
99 | tbl->locks_mask = size - 1; | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static void nested_table_free(union nested_table *ntbl, unsigned int size) | 68 | static void nested_table_free(union nested_table *ntbl, unsigned int size) |
105 | { | 69 | { |
106 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | 70 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl) | |||
140 | if (tbl->nest) | 104 | if (tbl->nest) |
141 | nested_bucket_table_free(tbl); | 105 | nested_bucket_table_free(tbl); |
142 | 106 | ||
143 | kvfree(tbl->locks); | 107 | free_bucket_spinlocks(tbl->locks); |
144 | kvfree(tbl); | 108 | kvfree(tbl); |
145 | } | 109 | } |
146 | 110 | ||
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
207 | gfp_t gfp) | 171 | gfp_t gfp) |
208 | { | 172 | { |
209 | struct bucket_table *tbl = NULL; | 173 | struct bucket_table *tbl = NULL; |
210 | size_t size; | 174 | size_t size, max_locks; |
211 | int i; | 175 | int i; |
212 | 176 | ||
213 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 177 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
227 | 191 | ||
228 | tbl->size = size; | 192 | tbl->size = size; |
229 | 193 | ||
230 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { | 194 | max_locks = size >> 1; |
195 | if (tbl->nest) | ||
196 | max_locks = min_t(size_t, max_locks, 1U << tbl->nest); | ||
197 | |||
198 | if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks, | ||
199 | ht->p.locks_mul, gfp) < 0) { | ||
231 | bucket_table_free(tbl); | 200 | bucket_table_free(tbl); |
232 | return NULL; | 201 | return NULL; |
233 | } | 202 | } |
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) | |||
707 | iter->p = NULL; | 676 | iter->p = NULL; |
708 | iter->slot = 0; | 677 | iter->slot = 0; |
709 | iter->skip = 0; | 678 | iter->skip = 0; |
679 | iter->end_of_table = 0; | ||
710 | 680 | ||
711 | spin_lock(&ht->lock); | 681 | spin_lock(&ht->lock); |
712 | iter->walker.tbl = | 682 | iter->walker.tbl = |
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter) | |||
732 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | 702 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
733 | 703 | ||
734 | /** | 704 | /** |
735 | * rhashtable_walk_start - Start a hash table walk | 705 | * rhashtable_walk_start_check - Start a hash table walk |
736 | * @iter: Hash table iterator | 706 | * @iter: Hash table iterator |
737 | * | 707 | * |
738 | * Start a hash table walk at the current iterator position. Note that we take | 708 | * Start a hash table walk at the current iterator position. Note that we take |
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | |||
744 | * Returns -EAGAIN if resize event occured. Note that the iterator | 714 | * Returns -EAGAIN if resize event occured. Note that the iterator |
745 | * will rewind back to the beginning and you may use it immediately | 715 | * will rewind back to the beginning and you may use it immediately |
746 | * by calling rhashtable_walk_next. | 716 | * by calling rhashtable_walk_next. |
717 | * | ||
718 | * rhashtable_walk_start is defined as an inline variant that returns | ||
719 | * void. This is preferred in cases where the caller would ignore | ||
720 | * resize events and always continue. | ||
747 | */ | 721 | */ |
748 | int rhashtable_walk_start(struct rhashtable_iter *iter) | 722 | int rhashtable_walk_start_check(struct rhashtable_iter *iter) |
749 | __acquires(RCU) | 723 | __acquires(RCU) |
750 | { | 724 | { |
751 | struct rhashtable *ht = iter->ht; | 725 | struct rhashtable *ht = iter->ht; |
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) | |||
757 | list_del(&iter->walker.list); | 731 | list_del(&iter->walker.list); |
758 | spin_unlock(&ht->lock); | 732 | spin_unlock(&ht->lock); |
759 | 733 | ||
760 | if (!iter->walker.tbl) { | 734 | if (!iter->walker.tbl && !iter->end_of_table) { |
761 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); | 735 | iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
762 | return -EAGAIN; | 736 | return -EAGAIN; |
763 | } | 737 | } |
764 | 738 | ||
765 | return 0; | 739 | return 0; |
766 | } | 740 | } |
767 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); | 741 | EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); |
768 | 742 | ||
769 | /** | 743 | /** |
770 | * rhashtable_walk_next - Return the next object and advance the iterator | 744 | * __rhashtable_walk_find_next - Find the next element in a table (or the first |
771 | * @iter: Hash table iterator | 745 | * one in case of a new walk). |
772 | * | 746 | * |
773 | * Note that you must call rhashtable_walk_stop when you are finished | 747 | * @iter: Hash table iterator |
774 | * with the walk. | ||
775 | * | 748 | * |
776 | * Returns the next object or NULL when the end of the table is reached. | 749 | * Returns the found object or NULL when the end of the table is reached. |
777 | * | 750 | * |
778 | * Returns -EAGAIN if resize event occured. Note that the iterator | 751 | * Returns -EAGAIN if resize event occurred. |
779 | * will rewind back to the beginning and you may continue to use it. | ||
780 | */ | 752 | */ |
781 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | 753 | static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) |
782 | { | 754 | { |
783 | struct bucket_table *tbl = iter->walker.tbl; | 755 | struct bucket_table *tbl = iter->walker.tbl; |
784 | struct rhlist_head *list = iter->list; | 756 | struct rhlist_head *list = iter->list; |
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter) | |||
786 | struct rhash_head *p = iter->p; | 758 | struct rhash_head *p = iter->p; |
787 | bool rhlist = ht->rhlist; | 759 | bool rhlist = ht->rhlist; |
788 | 760 | ||
789 | if (p) { | 761 | if (!tbl) |
790 | if (!rhlist || !(list = rcu_dereference(list->next))) { | 762 | return NULL; |
791 | p = rcu_dereference(p->next); | ||
792 | list = container_of(p, struct rhlist_head, rhead); | ||
793 | } | ||
794 | goto next; | ||
795 | } | ||
796 | 763 | ||
797 | for (; iter->slot < tbl->size; iter->slot++) { | 764 | for (; iter->slot < tbl->size; iter->slot++) { |
798 | int skip = iter->skip; | 765 | int skip = iter->skip; |
@@ -836,13 +803,90 @@ next: | |||
836 | iter->slot = 0; | 803 | iter->slot = 0; |
837 | iter->skip = 0; | 804 | iter->skip = 0; |
838 | return ERR_PTR(-EAGAIN); | 805 | return ERR_PTR(-EAGAIN); |
806 | } else { | ||
807 | iter->end_of_table = true; | ||
839 | } | 808 | } |
840 | 809 | ||
841 | return NULL; | 810 | return NULL; |
842 | } | 811 | } |
812 | |||
813 | /** | ||
814 | * rhashtable_walk_next - Return the next object and advance the iterator | ||
815 | * @iter: Hash table iterator | ||
816 | * | ||
817 | * Note that you must call rhashtable_walk_stop when you are finished | ||
818 | * with the walk. | ||
819 | * | ||
820 | * Returns the next object or NULL when the end of the table is reached. | ||
821 | * | ||
822 | * Returns -EAGAIN if resize event occurred. Note that the iterator | ||
823 | * will rewind back to the beginning and you may continue to use it. | ||
824 | */ | ||
825 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | ||
826 | { | ||
827 | struct rhlist_head *list = iter->list; | ||
828 | struct rhashtable *ht = iter->ht; | ||
829 | struct rhash_head *p = iter->p; | ||
830 | bool rhlist = ht->rhlist; | ||
831 | |||
832 | if (p) { | ||
833 | if (!rhlist || !(list = rcu_dereference(list->next))) { | ||
834 | p = rcu_dereference(p->next); | ||
835 | list = container_of(p, struct rhlist_head, rhead); | ||
836 | } | ||
837 | if (!rht_is_a_nulls(p)) { | ||
838 | iter->skip++; | ||
839 | iter->p = p; | ||
840 | iter->list = list; | ||
841 | return rht_obj(ht, rhlist ? &list->rhead : p); | ||
842 | } | ||
843 | |||
844 | /* At the end of this slot, switch to next one and then find | ||
845 | * next entry from that point. | ||
846 | */ | ||
847 | iter->skip = 0; | ||
848 | iter->slot++; | ||
849 | } | ||
850 | |||
851 | return __rhashtable_walk_find_next(iter); | ||
852 | } | ||
843 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); | 853 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); |
844 | 854 | ||
845 | /** | 855 | /** |
856 | * rhashtable_walk_peek - Return the next object but don't advance the iterator | ||
857 | * @iter: Hash table iterator | ||
858 | * | ||
859 | * Returns the next object or NULL when the end of the table is reached. | ||
860 | * | ||
861 | * Returns -EAGAIN if resize event occurred. Note that the iterator | ||
862 | * will rewind back to the beginning and you may continue to use it. | ||
863 | */ | ||
864 | void *rhashtable_walk_peek(struct rhashtable_iter *iter) | ||
865 | { | ||
866 | struct rhlist_head *list = iter->list; | ||
867 | struct rhashtable *ht = iter->ht; | ||
868 | struct rhash_head *p = iter->p; | ||
869 | |||
870 | if (p) | ||
871 | return rht_obj(ht, ht->rhlist ? &list->rhead : p); | ||
872 | |||
873 | /* No object found in current iter, find next one in the table. */ | ||
874 | |||
875 | if (iter->skip) { | ||
876 | /* A nonzero skip value points to the next entry in the table | ||
877 | * beyond that last one that was found. Decrement skip so | ||
878 | * we find the current value. __rhashtable_walk_find_next | ||
879 | * will restore the original value of skip assuming that | ||
880 | * the table hasn't changed. | ||
881 | */ | ||
882 | iter->skip--; | ||
883 | } | ||
884 | |||
885 | return __rhashtable_walk_find_next(iter); | ||
886 | } | ||
887 | EXPORT_SYMBOL_GPL(rhashtable_walk_peek); | ||
888 | |||
889 | /** | ||
846 | * rhashtable_walk_stop - Finish a hash table walk | 890 | * rhashtable_walk_stop - Finish a hash table walk |
847 | * @iter: Hash table iterator | 891 | * @iter: Hash table iterator |
848 | * | 892 | * |
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 8e83cbdc049c..76d3667fdea2 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
@@ -162,11 +162,7 @@ static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) | |||
162 | return; | 162 | return; |
163 | } | 163 | } |
164 | 164 | ||
165 | err = rhashtable_walk_start(&hti); | 165 | rhashtable_walk_start(&hti); |
166 | if (err && err != -EAGAIN) { | ||
167 | pr_warn("Test failed: iterator failed: %d\n", err); | ||
168 | return; | ||
169 | } | ||
170 | 166 | ||
171 | while ((pos = rhashtable_walk_next(&hti))) { | 167 | while ((pos = rhashtable_walk_next(&hti))) { |
172 | if (PTR_ERR(pos) == -EAGAIN) { | 168 | if (PTR_ERR(pos) == -EAGAIN) { |
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 6eb5e68f112a..44c39c5f0638 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c | |||
@@ -512,9 +512,7 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
512 | struct ila_map *ila; | 512 | struct ila_map *ila; |
513 | int ret; | 513 | int ret; |
514 | 514 | ||
515 | ret = rhashtable_walk_start(rhiter); | 515 | rhashtable_walk_start(rhiter); |
516 | if (ret && ret != -EAGAIN) | ||
517 | goto done; | ||
518 | 516 | ||
519 | for (;;) { | 517 | for (;;) { |
520 | ila = rhashtable_walk_next(rhiter); | 518 | ila = rhashtable_walk_next(rhiter); |
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index c81407770956..7f5621d09571 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c | |||
@@ -306,9 +306,7 @@ static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb) | |||
306 | struct seg6_hmac_info *hinfo; | 306 | struct seg6_hmac_info *hinfo; |
307 | int ret; | 307 | int ret; |
308 | 308 | ||
309 | ret = rhashtable_walk_start(iter); | 309 | rhashtable_walk_start(iter); |
310 | if (ret && ret != -EAGAIN) | ||
311 | goto done; | ||
312 | 310 | ||
313 | for (;;) { | 311 | for (;;) { |
314 | hinfo = rhashtable_walk_next(iter); | 312 | hinfo = rhashtable_walk_next(iter); |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 86c8dfef56a4..a5125624a76d 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -257,9 +257,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) | |||
257 | if (ret) | 257 | if (ret) |
258 | return NULL; | 258 | return NULL; |
259 | 259 | ||
260 | ret = rhashtable_walk_start(&iter); | 260 | rhashtable_walk_start(&iter); |
261 | if (ret && ret != -EAGAIN) | ||
262 | goto err; | ||
263 | 261 | ||
264 | while ((mpath = rhashtable_walk_next(&iter))) { | 262 | while ((mpath = rhashtable_walk_next(&iter))) { |
265 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 263 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) |
@@ -269,7 +267,6 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) | |||
269 | if (i++ == idx) | 267 | if (i++ == idx) |
270 | break; | 268 | break; |
271 | } | 269 | } |
272 | err: | ||
273 | rhashtable_walk_stop(&iter); | 270 | rhashtable_walk_stop(&iter); |
274 | rhashtable_walk_exit(&iter); | 271 | rhashtable_walk_exit(&iter); |
275 | 272 | ||
@@ -513,9 +510,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
513 | if (ret) | 510 | if (ret) |
514 | return; | 511 | return; |
515 | 512 | ||
516 | ret = rhashtable_walk_start(&iter); | 513 | rhashtable_walk_start(&iter); |
517 | if (ret && ret != -EAGAIN) | ||
518 | goto out; | ||
519 | 514 | ||
520 | while ((mpath = rhashtable_walk_next(&iter))) { | 515 | while ((mpath = rhashtable_walk_next(&iter))) { |
521 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 516 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) |
@@ -535,7 +530,6 @@ void mesh_plink_broken(struct sta_info *sta) | |||
535 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); | 530 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); |
536 | } | 531 | } |
537 | } | 532 | } |
538 | out: | ||
539 | rhashtable_walk_stop(&iter); | 533 | rhashtable_walk_stop(&iter); |
540 | rhashtable_walk_exit(&iter); | 534 | rhashtable_walk_exit(&iter); |
541 | } | 535 | } |
@@ -584,9 +578,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
584 | if (ret) | 578 | if (ret) |
585 | return; | 579 | return; |
586 | 580 | ||
587 | ret = rhashtable_walk_start(&iter); | 581 | rhashtable_walk_start(&iter); |
588 | if (ret && ret != -EAGAIN) | ||
589 | goto out; | ||
590 | 582 | ||
591 | while ((mpath = rhashtable_walk_next(&iter))) { | 583 | while ((mpath = rhashtable_walk_next(&iter))) { |
592 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 584 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) |
@@ -597,7 +589,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
597 | if (rcu_access_pointer(mpath->next_hop) == sta) | 589 | if (rcu_access_pointer(mpath->next_hop) == sta) |
598 | __mesh_path_del(tbl, mpath); | 590 | __mesh_path_del(tbl, mpath); |
599 | } | 591 | } |
600 | out: | 592 | |
601 | rhashtable_walk_stop(&iter); | 593 | rhashtable_walk_stop(&iter); |
602 | rhashtable_walk_exit(&iter); | 594 | rhashtable_walk_exit(&iter); |
603 | } | 595 | } |
@@ -614,9 +606,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | |||
614 | if (ret) | 606 | if (ret) |
615 | return; | 607 | return; |
616 | 608 | ||
617 | ret = rhashtable_walk_start(&iter); | 609 | rhashtable_walk_start(&iter); |
618 | if (ret && ret != -EAGAIN) | ||
619 | goto out; | ||
620 | 610 | ||
621 | while ((mpath = rhashtable_walk_next(&iter))) { | 611 | while ((mpath = rhashtable_walk_next(&iter))) { |
622 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 612 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) |
@@ -627,7 +617,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | |||
627 | if (ether_addr_equal(mpath->mpp, proxy)) | 617 | if (ether_addr_equal(mpath->mpp, proxy)) |
628 | __mesh_path_del(tbl, mpath); | 618 | __mesh_path_del(tbl, mpath); |
629 | } | 619 | } |
630 | out: | 620 | |
631 | rhashtable_walk_stop(&iter); | 621 | rhashtable_walk_stop(&iter); |
632 | rhashtable_walk_exit(&iter); | 622 | rhashtable_walk_exit(&iter); |
633 | } | 623 | } |
@@ -642,9 +632,7 @@ static void table_flush_by_iface(struct mesh_table *tbl) | |||
642 | if (ret) | 632 | if (ret) |
643 | return; | 633 | return; |
644 | 634 | ||
645 | ret = rhashtable_walk_start(&iter); | 635 | rhashtable_walk_start(&iter); |
646 | if (ret && ret != -EAGAIN) | ||
647 | goto out; | ||
648 | 636 | ||
649 | while ((mpath = rhashtable_walk_next(&iter))) { | 637 | while ((mpath = rhashtable_walk_next(&iter))) { |
650 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 638 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) |
@@ -653,7 +641,7 @@ static void table_flush_by_iface(struct mesh_table *tbl) | |||
653 | break; | 641 | break; |
654 | __mesh_path_del(tbl, mpath); | 642 | __mesh_path_del(tbl, mpath); |
655 | } | 643 | } |
656 | out: | 644 | |
657 | rhashtable_walk_stop(&iter); | 645 | rhashtable_walk_stop(&iter); |
658 | rhashtable_walk_exit(&iter); | 646 | rhashtable_walk_exit(&iter); |
659 | } | 647 | } |
@@ -873,9 +861,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, | |||
873 | if (ret) | 861 | if (ret) |
874 | return; | 862 | return; |
875 | 863 | ||
876 | ret = rhashtable_walk_start(&iter); | 864 | rhashtable_walk_start(&iter); |
877 | if (ret && ret != -EAGAIN) | ||
878 | goto out; | ||
879 | 865 | ||
880 | while ((mpath = rhashtable_walk_next(&iter))) { | 866 | while ((mpath = rhashtable_walk_next(&iter))) { |
881 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 867 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) |
@@ -887,7 +873,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, | |||
887 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) | 873 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
888 | __mesh_path_del(tbl, mpath); | 874 | __mesh_path_del(tbl, mpath); |
889 | } | 875 | } |
890 | out: | 876 | |
891 | rhashtable_walk_stop(&iter); | 877 | rhashtable_walk_stop(&iter); |
892 | rhashtable_walk_exit(&iter); | 878 | rhashtable_walk_exit(&iter); |
893 | } | 879 | } |
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index f8166c1d5430..3f1624ee056f 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
@@ -251,11 +251,7 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, | |||
251 | if (err) | 251 | if (err) |
252 | return; | 252 | return; |
253 | 253 | ||
254 | err = rhashtable_walk_start(&hti); | 254 | rhashtable_walk_start(&hti); |
255 | if (err && err != -EAGAIN) { | ||
256 | iter->err = err; | ||
257 | goto out; | ||
258 | } | ||
259 | 255 | ||
260 | while ((he = rhashtable_walk_next(&hti))) { | 256 | while ((he = rhashtable_walk_next(&hti))) { |
261 | if (IS_ERR(he)) { | 257 | if (IS_ERR(he)) { |
@@ -306,9 +302,7 @@ static void nft_rhash_gc(struct work_struct *work) | |||
306 | if (err) | 302 | if (err) |
307 | goto schedule; | 303 | goto schedule; |
308 | 304 | ||
309 | err = rhashtable_walk_start(&hti); | 305 | rhashtable_walk_start(&hti); |
310 | if (err && err != -EAGAIN) | ||
311 | goto out; | ||
312 | 306 | ||
313 | while ((he = rhashtable_walk_next(&hti))) { | 307 | while ((he = rhashtable_walk_next(&hti))) { |
314 | if (IS_ERR(he)) { | 308 | if (IS_ERR(he)) { |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index b9e0ee4e22f5..ab325d4d6fef 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2478,8 +2478,9 @@ static int netlink_walk_start(struct nl_seq_iter *iter) | |||
2478 | return err; | 2478 | return err; |
2479 | } | 2479 | } |
2480 | 2480 | ||
2481 | err = rhashtable_walk_start(&iter->hti); | 2481 | rhashtable_walk_start(&iter->hti); |
2482 | return err == -EAGAIN ? 0 : err; | 2482 | |
2483 | return 0; | ||
2483 | } | 2484 | } |
2484 | 2485 | ||
2485 | static void netlink_walk_stop(struct nl_seq_iter *iter) | 2486 | static void netlink_walk_stop(struct nl_seq_iter *iter) |
diff --git a/net/netlink/diag.c b/net/netlink/diag.c index 8faa20b4d457..7dda33b9b784 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c | |||
@@ -115,11 +115,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, | |||
115 | if (!s_num) | 115 | if (!s_num) |
116 | rhashtable_walk_enter(&tbl->hash, hti); | 116 | rhashtable_walk_enter(&tbl->hash, hti); |
117 | 117 | ||
118 | ret = rhashtable_walk_start(hti); | 118 | rhashtable_walk_start(hti); |
119 | if (ret == -EAGAIN) | ||
120 | ret = 0; | ||
121 | if (ret) | ||
122 | goto stop; | ||
123 | 119 | ||
124 | while ((nlsk = rhashtable_walk_next(hti))) { | 120 | while ((nlsk = rhashtable_walk_next(hti))) { |
125 | if (IS_ERR(nlsk)) { | 121 | if (IS_ERR(nlsk)) { |
@@ -146,8 +142,8 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, | |||
146 | } | 142 | } |
147 | } | 143 | } |
148 | 144 | ||
149 | stop: | ||
150 | rhashtable_walk_stop(hti); | 145 | rhashtable_walk_stop(hti); |
146 | |||
151 | if (ret) | 147 | if (ret) |
152 | goto done; | 148 | goto done; |
153 | 149 | ||
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 26b4be6b4172..4545bc2aff84 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -288,12 +288,8 @@ struct sctp_ht_iter { | |||
288 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | 288 | static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) |
289 | { | 289 | { |
290 | struct sctp_ht_iter *iter = seq->private; | 290 | struct sctp_ht_iter *iter = seq->private; |
291 | int err = sctp_transport_walk_start(&iter->hti); | ||
292 | 291 | ||
293 | if (err) { | 292 | sctp_transport_walk_start(&iter->hti); |
294 | iter->start_fail = 1; | ||
295 | return ERR_PTR(err); | ||
296 | } | ||
297 | 293 | ||
298 | iter->start_fail = 0; | 294 | iter->start_fail = 0; |
299 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); | 295 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index eb17a911aa29..3e55daa37e66 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -4676,20 +4676,11 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, | |||
4676 | EXPORT_SYMBOL_GPL(sctp_get_sctp_info); | 4676 | EXPORT_SYMBOL_GPL(sctp_get_sctp_info); |
4677 | 4677 | ||
4678 | /* use callback to avoid exporting the core structure */ | 4678 | /* use callback to avoid exporting the core structure */ |
4679 | int sctp_transport_walk_start(struct rhashtable_iter *iter) | 4679 | void sctp_transport_walk_start(struct rhashtable_iter *iter) |
4680 | { | 4680 | { |
4681 | int err; | ||
4682 | |||
4683 | rhltable_walk_enter(&sctp_transport_hashtable, iter); | 4681 | rhltable_walk_enter(&sctp_transport_hashtable, iter); |
4684 | 4682 | ||
4685 | err = rhashtable_walk_start(iter); | 4683 | rhashtable_walk_start(iter); |
4686 | if (err && err != -EAGAIN) { | ||
4687 | rhashtable_walk_stop(iter); | ||
4688 | rhashtable_walk_exit(iter); | ||
4689 | return err; | ||
4690 | } | ||
4691 | |||
4692 | return 0; | ||
4693 | } | 4684 | } |
4694 | 4685 | ||
4695 | void sctp_transport_walk_stop(struct rhashtable_iter *iter) | 4686 | void sctp_transport_walk_stop(struct rhashtable_iter *iter) |
@@ -4780,12 +4771,10 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), | |||
4780 | struct net *net, int *pos, void *p) { | 4771 | struct net *net, int *pos, void *p) { |
4781 | struct rhashtable_iter hti; | 4772 | struct rhashtable_iter hti; |
4782 | struct sctp_transport *tsp; | 4773 | struct sctp_transport *tsp; |
4783 | int ret; | 4774 | int ret = 0; |
4784 | 4775 | ||
4785 | again: | 4776 | again: |
4786 | ret = sctp_transport_walk_start(&hti); | 4777 | sctp_transport_walk_start(&hti); |
4787 | if (ret) | ||
4788 | return ret; | ||
4789 | 4778 | ||
4790 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); | 4779 | tsp = sctp_transport_get_idx(net, &hti, *pos + 1); |
4791 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { | 4780 | for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 5d18c0caa92b..22c4fd8a9dfe 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -2640,9 +2640,7 @@ void tipc_sk_reinit(struct net *net) | |||
2640 | rhashtable_walk_enter(&tn->sk_rht, &iter); | 2640 | rhashtable_walk_enter(&tn->sk_rht, &iter); |
2641 | 2641 | ||
2642 | do { | 2642 | do { |
2643 | tsk = ERR_PTR(rhashtable_walk_start(&iter)); | 2643 | rhashtable_walk_start(&iter); |
2644 | if (IS_ERR(tsk)) | ||
2645 | goto walk_stop; | ||
2646 | 2644 | ||
2647 | while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { | 2645 | while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { |
2648 | spin_lock_bh(&tsk->sk.sk_lock.slock); | 2646 | spin_lock_bh(&tsk->sk.sk_lock.slock); |
@@ -2651,7 +2649,7 @@ void tipc_sk_reinit(struct net *net) | |||
2651 | msg_set_orignode(msg, tn->own_addr); | 2649 | msg_set_orignode(msg, tn->own_addr); |
2652 | spin_unlock_bh(&tsk->sk.sk_lock.slock); | 2650 | spin_unlock_bh(&tsk->sk.sk_lock.slock); |
2653 | } | 2651 | } |
2654 | walk_stop: | 2652 | |
2655 | rhashtable_walk_stop(&iter); | 2653 | rhashtable_walk_stop(&iter); |
2656 | } while (tsk == ERR_PTR(-EAGAIN)); | 2654 | } while (tsk == ERR_PTR(-EAGAIN)); |
2657 | } | 2655 | } |