diff options
author | Thomas Graf <tgraf@suug.ch> | 2015-03-24 09:18:17 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-24 17:48:39 -0400 |
commit | 299e5c32a37a6bca8175db177117467bd1ce970a (patch) | |
tree | e625a03790b29449ad4992db6a3250305ff5831f /lib | |
parent | 58be8a583d8d316448bafa5926414cfb83c02dec (diff) |
rhashtable: Use 'unsigned int' consistently
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rhashtable.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 8514f7c5f029..50abe4fec4b8 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -153,7 +153,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, | |||
153 | return new_tbl; | 153 | return new_tbl; |
154 | } | 154 | } |
155 | 155 | ||
156 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) | 156 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) |
157 | { | 157 | { |
158 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 158 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
159 | struct bucket_table *new_tbl = rhashtable_last_table(ht, | 159 | struct bucket_table *new_tbl = rhashtable_last_table(ht, |
@@ -162,7 +162,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) | |||
162 | int err = -ENOENT; | 162 | int err = -ENOENT; |
163 | struct rhash_head *head, *next, *entry; | 163 | struct rhash_head *head, *next, *entry; |
164 | spinlock_t *new_bucket_lock; | 164 | spinlock_t *new_bucket_lock; |
165 | unsigned new_hash; | 165 | unsigned int new_hash; |
166 | 166 | ||
167 | rht_for_each(entry, old_tbl, old_hash) { | 167 | rht_for_each(entry, old_tbl, old_hash) { |
168 | err = 0; | 168 | err = 0; |
@@ -199,7 +199,8 @@ out: | |||
199 | return err; | 199 | return err; |
200 | } | 200 | } |
201 | 201 | ||
202 | static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash) | 202 | static void rhashtable_rehash_chain(struct rhashtable *ht, |
203 | unsigned int old_hash) | ||
203 | { | 204 | { |
204 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 205 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
205 | spinlock_t *old_bucket_lock; | 206 | spinlock_t *old_bucket_lock; |
@@ -244,7 +245,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht) | |||
244 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 245 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
245 | struct bucket_table *new_tbl; | 246 | struct bucket_table *new_tbl; |
246 | struct rhashtable_walker *walker; | 247 | struct rhashtable_walker *walker; |
247 | unsigned old_hash; | 248 | unsigned int old_hash; |
248 | 249 | ||
249 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | 250 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); |
250 | if (!new_tbl) | 251 | if (!new_tbl) |
@@ -324,11 +325,12 @@ static int rhashtable_expand(struct rhashtable *ht) | |||
324 | static int rhashtable_shrink(struct rhashtable *ht) | 325 | static int rhashtable_shrink(struct rhashtable *ht) |
325 | { | 326 | { |
326 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 327 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
327 | unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); | 328 | unsigned int size; |
328 | int err; | 329 | int err; |
329 | 330 | ||
330 | ASSERT_RHT_MUTEX(ht); | 331 | ASSERT_RHT_MUTEX(ht); |
331 | 332 | ||
333 | size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); | ||
332 | if (size < ht->p.min_size) | 334 | if (size < ht->p.min_size) |
333 | size = ht->p.min_size; | 335 | size = ht->p.min_size; |
334 | 336 | ||
@@ -379,9 +381,9 @@ unlock: | |||
379 | 381 | ||
380 | static bool rhashtable_check_elasticity(struct rhashtable *ht, | 382 | static bool rhashtable_check_elasticity(struct rhashtable *ht, |
381 | struct bucket_table *tbl, | 383 | struct bucket_table *tbl, |
382 | unsigned hash) | 384 | unsigned int hash) |
383 | { | 385 | { |
384 | unsigned elasticity = ht->elasticity; | 386 | unsigned int elasticity = ht->elasticity; |
385 | struct rhash_head *head; | 387 | struct rhash_head *head; |
386 | 388 | ||
387 | rht_for_each(head, tbl, hash) | 389 | rht_for_each(head, tbl, hash) |
@@ -431,7 +433,7 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |||
431 | struct bucket_table *tbl) | 433 | struct bucket_table *tbl) |
432 | { | 434 | { |
433 | struct rhash_head *head; | 435 | struct rhash_head *head; |
434 | unsigned hash; | 436 | unsigned int hash; |
435 | int err; | 437 | int err; |
436 | 438 | ||
437 | tbl = rhashtable_last_table(ht, tbl); | 439 | tbl = rhashtable_last_table(ht, tbl); |