aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2015-03-24 09:18:17 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-24 17:48:39 -0400
commit299e5c32a37a6bca8175db177117467bd1ce970a (patch)
treee625a03790b29449ad4992db6a3250305ff5831f
parent58be8a583d8d316448bafa5926414cfb83c02dec (diff)
rhashtable: Use 'unsigned int' consistently
Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/rhashtable.h14
-rw-r--r--lib/rhashtable.c18
2 files changed, 17 insertions, 15 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5976ab59b88f..f89cda067cb9 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -208,13 +208,13 @@ static inline unsigned int rht_key_hashfn(
208 struct rhashtable *ht, const struct bucket_table *tbl, 208 struct rhashtable *ht, const struct bucket_table *tbl,
209 const void *key, const struct rhashtable_params params) 209 const void *key, const struct rhashtable_params params)
210{ 210{
211 unsigned hash; 211 unsigned int hash;
212 212
213 /* params must be equal to ht->p if it isn't constant. */ 213 /* params must be equal to ht->p if it isn't constant. */
214 if (!__builtin_constant_p(params.key_len)) 214 if (!__builtin_constant_p(params.key_len))
215 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); 215 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
216 else if (params.key_len) { 216 else if (params.key_len) {
217 unsigned key_len = params.key_len; 217 unsigned int key_len = params.key_len;
218 218
219 if (params.hashfn) 219 if (params.hashfn)
220 hash = params.hashfn(key, key_len, tbl->hash_rnd); 220 hash = params.hashfn(key, key_len, tbl->hash_rnd);
@@ -224,7 +224,7 @@ static inline unsigned int rht_key_hashfn(
224 hash = jhash2(key, key_len / sizeof(u32), 224 hash = jhash2(key, key_len / sizeof(u32),
225 tbl->hash_rnd); 225 tbl->hash_rnd);
226 } else { 226 } else {
227 unsigned key_len = ht->p.key_len; 227 unsigned int key_len = ht->p.key_len;
228 228
229 if (params.hashfn) 229 if (params.hashfn)
230 hash = params.hashfn(key, key_len, tbl->hash_rnd); 230 hash = params.hashfn(key, key_len, tbl->hash_rnd);
@@ -512,7 +512,7 @@ static inline void *rhashtable_lookup_fast(
512 }; 512 };
513 const struct bucket_table *tbl; 513 const struct bucket_table *tbl;
514 struct rhash_head *he; 514 struct rhash_head *he;
515 unsigned hash; 515 unsigned int hash;
516 516
517 rcu_read_lock(); 517 rcu_read_lock();
518 518
@@ -550,8 +550,8 @@ static inline int __rhashtable_insert_fast(
550 struct bucket_table *tbl, *new_tbl; 550 struct bucket_table *tbl, *new_tbl;
551 struct rhash_head *head; 551 struct rhash_head *head;
552 spinlock_t *lock; 552 spinlock_t *lock;
553 unsigned elasticity; 553 unsigned int elasticity;
554 unsigned hash; 554 unsigned int hash;
555 int err; 555 int err;
556 556
557restart: 557restart:
@@ -718,7 +718,7 @@ static inline int __rhashtable_remove_fast(
718 struct rhash_head __rcu **pprev; 718 struct rhash_head __rcu **pprev;
719 struct rhash_head *he; 719 struct rhash_head *he;
720 spinlock_t * lock; 720 spinlock_t * lock;
721 unsigned hash; 721 unsigned int hash;
722 int err = -ENOENT; 722 int err = -ENOENT;
723 723
724 hash = rht_head_hashfn(ht, tbl, obj, params); 724 hash = rht_head_hashfn(ht, tbl, obj, params);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 8514f7c5f029..50abe4fec4b8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -153,7 +153,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
153 return new_tbl; 153 return new_tbl;
154} 154}
155 155
156static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) 156static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
157{ 157{
158 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 158 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
159 struct bucket_table *new_tbl = rhashtable_last_table(ht, 159 struct bucket_table *new_tbl = rhashtable_last_table(ht,
@@ -162,7 +162,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
162 int err = -ENOENT; 162 int err = -ENOENT;
163 struct rhash_head *head, *next, *entry; 163 struct rhash_head *head, *next, *entry;
164 spinlock_t *new_bucket_lock; 164 spinlock_t *new_bucket_lock;
165 unsigned new_hash; 165 unsigned int new_hash;
166 166
167 rht_for_each(entry, old_tbl, old_hash) { 167 rht_for_each(entry, old_tbl, old_hash) {
168 err = 0; 168 err = 0;
@@ -199,7 +199,8 @@ out:
199 return err; 199 return err;
200} 200}
201 201
202static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash) 202static void rhashtable_rehash_chain(struct rhashtable *ht,
203 unsigned int old_hash)
203{ 204{
204 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 205 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
205 spinlock_t *old_bucket_lock; 206 spinlock_t *old_bucket_lock;
@@ -244,7 +245,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
244 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 245 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
245 struct bucket_table *new_tbl; 246 struct bucket_table *new_tbl;
246 struct rhashtable_walker *walker; 247 struct rhashtable_walker *walker;
247 unsigned old_hash; 248 unsigned int old_hash;
248 249
249 new_tbl = rht_dereference(old_tbl->future_tbl, ht); 250 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
250 if (!new_tbl) 251 if (!new_tbl)
@@ -324,11 +325,12 @@ static int rhashtable_expand(struct rhashtable *ht)
324static int rhashtable_shrink(struct rhashtable *ht) 325static int rhashtable_shrink(struct rhashtable *ht)
325{ 326{
326 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
327 unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); 328 unsigned int size;
328 int err; 329 int err;
329 330
330 ASSERT_RHT_MUTEX(ht); 331 ASSERT_RHT_MUTEX(ht);
331 332
333 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
332 if (size < ht->p.min_size) 334 if (size < ht->p.min_size)
333 size = ht->p.min_size; 335 size = ht->p.min_size;
334 336
@@ -379,9 +381,9 @@ unlock:
379 381
380static bool rhashtable_check_elasticity(struct rhashtable *ht, 382static bool rhashtable_check_elasticity(struct rhashtable *ht,
381 struct bucket_table *tbl, 383 struct bucket_table *tbl,
382 unsigned hash) 384 unsigned int hash)
383{ 385{
384 unsigned elasticity = ht->elasticity; 386 unsigned int elasticity = ht->elasticity;
385 struct rhash_head *head; 387 struct rhash_head *head;
386 388
387 rht_for_each(head, tbl, hash) 389 rht_for_each(head, tbl, hash)
@@ -431,7 +433,7 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
431 struct bucket_table *tbl) 433 struct bucket_table *tbl)
432{ 434{
433 struct rhash_head *head; 435 struct rhash_head *head;
434 unsigned hash; 436 unsigned int hash;
435 int err; 437 int err;
436 438
437 tbl = rhashtable_last_table(ht, tbl); 439 tbl = rhashtable_last_table(ht, tbl);