diff options
-rw-r--r-- | include/linux/rhashtable.h | 6 | ||||
-rw-r--r-- | lib/rhashtable.c | 55 |
2 files changed, 28 insertions, 33 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 5ef8ea551556..c93ff8ac474a 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -50,6 +50,7 @@ struct rhash_head { | |||
50 | * struct bucket_table - Table of hash buckets | 50 | * struct bucket_table - Table of hash buckets |
51 | * @size: Number of hash buckets | 51 | * @size: Number of hash buckets |
52 | * @hash_rnd: Random seed to fold into hash | 52 | * @hash_rnd: Random seed to fold into hash |
53 | * @shift: Current size (1 << shift) | ||
53 | * @locks_mask: Mask to apply before accessing locks[] | 54 | * @locks_mask: Mask to apply before accessing locks[] |
54 | * @locks: Array of spinlocks protecting individual buckets | 55 | * @locks: Array of spinlocks protecting individual buckets |
55 | * @buckets: size * hash buckets | 56 | * @buckets: size * hash buckets |
@@ -57,6 +58,7 @@ struct rhash_head { | |||
57 | struct bucket_table { | 58 | struct bucket_table { |
58 | size_t size; | 59 | size_t size; |
59 | u32 hash_rnd; | 60 | u32 hash_rnd; |
61 | u32 shift; | ||
60 | unsigned int locks_mask; | 62 | unsigned int locks_mask; |
61 | spinlock_t *locks; | 63 | spinlock_t *locks; |
62 | 64 | ||
@@ -99,7 +101,6 @@ struct rhashtable_params { | |||
99 | * @tbl: Bucket table | 101 | * @tbl: Bucket table |
100 | * @future_tbl: Table under construction during expansion/shrinking | 102 | * @future_tbl: Table under construction during expansion/shrinking |
101 | * @nelems: Number of elements in table | 103 | * @nelems: Number of elements in table |
102 | * @shift: Current size (1 << shift) | ||
103 | * @p: Configuration parameters | 104 | * @p: Configuration parameters |
104 | * @run_work: Deferred worker to expand/shrink asynchronously | 105 | * @run_work: Deferred worker to expand/shrink asynchronously |
105 | * @mutex: Mutex to protect current/future table swapping | 106 | * @mutex: Mutex to protect current/future table swapping |
@@ -110,12 +111,11 @@ struct rhashtable { | |||
110 | struct bucket_table __rcu *tbl; | 111 | struct bucket_table __rcu *tbl; |
111 | struct bucket_table __rcu *future_tbl; | 112 | struct bucket_table __rcu *future_tbl; |
112 | atomic_t nelems; | 113 | atomic_t nelems; |
113 | atomic_t shift; | 114 | bool being_destroyed; |
114 | struct rhashtable_params p; | 115 | struct rhashtable_params p; |
115 | struct work_struct run_work; | 116 | struct work_struct run_work; |
116 | struct mutex mutex; | 117 | struct mutex mutex; |
117 | struct list_head walkers; | 118 | struct list_head walkers; |
118 | bool being_destroyed; | ||
119 | }; | 119 | }; |
120 | 120 | ||
121 | /** | 121 | /** |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 68210cc2bab8..adea791ea3ab 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -147,7 +147,7 @@ static void bucket_table_free(const struct bucket_table *tbl) | |||
147 | } | 147 | } |
148 | 148 | ||
149 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | 149 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
150 | size_t nbuckets) | 150 | size_t nbuckets, u32 hash_rnd) |
151 | { | 151 | { |
152 | struct bucket_table *tbl = NULL; | 152 | struct bucket_table *tbl = NULL; |
153 | size_t size; | 153 | size_t size; |
@@ -162,6 +162,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
162 | return NULL; | 162 | return NULL; |
163 | 163 | ||
164 | tbl->size = nbuckets; | 164 | tbl->size = nbuckets; |
165 | tbl->shift = ilog2(nbuckets); | ||
166 | tbl->hash_rnd = hash_rnd; | ||
165 | 167 | ||
166 | if (alloc_bucket_locks(ht, tbl) < 0) { | 168 | if (alloc_bucket_locks(ht, tbl) < 0) { |
167 | bucket_table_free(tbl); | 169 | bucket_table_free(tbl); |
@@ -177,25 +179,27 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
177 | /** | 179 | /** |
178 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size | 180 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size |
179 | * @ht: hash table | 181 | * @ht: hash table |
180 | * @new_size: new table size | 182 | * @tbl: current table |
181 | */ | 183 | */ |
182 | static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) | 184 | static bool rht_grow_above_75(const struct rhashtable *ht, |
185 | const struct bucket_table *tbl) | ||
183 | { | 186 | { |
184 | /* Expand table when exceeding 75% load */ | 187 | /* Expand table when exceeding 75% load */ |
185 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && | 188 | return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && |
186 | (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); | 189 | (!ht->p.max_shift || tbl->shift < ht->p.max_shift); |
187 | } | 190 | } |
188 | 191 | ||
189 | /** | 192 | /** |
190 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size | 193 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size |
191 | * @ht: hash table | 194 | * @ht: hash table |
192 | * @new_size: new table size | 195 | * @tbl: current table |
193 | */ | 196 | */ |
194 | static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) | 197 | static bool rht_shrink_below_30(const struct rhashtable *ht, |
198 | const struct bucket_table *tbl) | ||
195 | { | 199 | { |
196 | /* Shrink table beneath 30% load */ | 200 | /* Shrink table beneath 30% load */ |
197 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && | 201 | return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && |
198 | (atomic_read(&ht->shift) > ht->p.min_shift); | 202 | tbl->shift > ht->p.min_shift; |
199 | } | 203 | } |
200 | 204 | ||
201 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) | 205 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) |
@@ -310,16 +314,11 @@ int rhashtable_expand(struct rhashtable *ht) | |||
310 | 314 | ||
311 | ASSERT_RHT_MUTEX(ht); | 315 | ASSERT_RHT_MUTEX(ht); |
312 | 316 | ||
313 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); | 317 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, old_tbl->hash_rnd); |
314 | if (new_tbl == NULL) | 318 | if (new_tbl == NULL) |
315 | return -ENOMEM; | 319 | return -ENOMEM; |
316 | 320 | ||
317 | new_tbl->hash_rnd = old_tbl->hash_rnd; | ||
318 | |||
319 | atomic_inc(&ht->shift); | ||
320 | |||
321 | rhashtable_rehash(ht, new_tbl); | 321 | rhashtable_rehash(ht, new_tbl); |
322 | |||
323 | return 0; | 322 | return 0; |
324 | } | 323 | } |
325 | EXPORT_SYMBOL_GPL(rhashtable_expand); | 324 | EXPORT_SYMBOL_GPL(rhashtable_expand); |
@@ -342,20 +341,15 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); | |||
342 | */ | 341 | */ |
343 | int rhashtable_shrink(struct rhashtable *ht) | 342 | int rhashtable_shrink(struct rhashtable *ht) |
344 | { | 343 | { |
345 | struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); | 344 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
346 | 345 | ||
347 | ASSERT_RHT_MUTEX(ht); | 346 | ASSERT_RHT_MUTEX(ht); |
348 | 347 | ||
349 | new_tbl = bucket_table_alloc(ht, tbl->size / 2); | 348 | new_tbl = bucket_table_alloc(ht, old_tbl->size / 2, old_tbl->hash_rnd); |
350 | if (new_tbl == NULL) | 349 | if (new_tbl == NULL) |
351 | return -ENOMEM; | 350 | return -ENOMEM; |
352 | 351 | ||
353 | new_tbl->hash_rnd = tbl->hash_rnd; | ||
354 | |||
355 | atomic_dec(&ht->shift); | ||
356 | |||
357 | rhashtable_rehash(ht, new_tbl); | 352 | rhashtable_rehash(ht, new_tbl); |
358 | |||
359 | return 0; | 353 | return 0; |
360 | } | 354 | } |
361 | EXPORT_SYMBOL_GPL(rhashtable_shrink); | 355 | EXPORT_SYMBOL_GPL(rhashtable_shrink); |
@@ -376,9 +370,9 @@ static void rht_deferred_worker(struct work_struct *work) | |||
376 | list_for_each_entry(walker, &ht->walkers, list) | 370 | list_for_each_entry(walker, &ht->walkers, list) |
377 | walker->resize = true; | 371 | walker->resize = true; |
378 | 372 | ||
379 | if (rht_grow_above_75(ht, tbl->size)) | 373 | if (rht_grow_above_75(ht, tbl)) |
380 | rhashtable_expand(ht); | 374 | rhashtable_expand(ht); |
381 | else if (rht_shrink_below_30(ht, tbl->size)) | 375 | else if (rht_shrink_below_30(ht, tbl)) |
382 | rhashtable_shrink(ht); | 376 | rhashtable_shrink(ht); |
383 | unlock: | 377 | unlock: |
384 | mutex_unlock(&ht->mutex); | 378 | mutex_unlock(&ht->mutex); |
@@ -431,7 +425,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | |||
431 | rcu_assign_pointer(tbl->buckets[hash], obj); | 425 | rcu_assign_pointer(tbl->buckets[hash], obj); |
432 | 426 | ||
433 | atomic_inc(&ht->nelems); | 427 | atomic_inc(&ht->nelems); |
434 | if (no_resize_running && rht_grow_above_75(ht, tbl->size)) | 428 | if (no_resize_running && rht_grow_above_75(ht, tbl)) |
435 | schedule_work(&ht->run_work); | 429 | schedule_work(&ht->run_work); |
436 | 430 | ||
437 | exit: | 431 | exit: |
@@ -539,7 +533,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) | |||
539 | bool no_resize_running = tbl == old_tbl; | 533 | bool no_resize_running = tbl == old_tbl; |
540 | 534 | ||
541 | atomic_dec(&ht->nelems); | 535 | atomic_dec(&ht->nelems); |
542 | if (no_resize_running && rht_shrink_below_30(ht, tbl->size)) | 536 | if (no_resize_running && rht_shrink_below_30(ht, tbl)) |
543 | schedule_work(&ht->run_work); | 537 | schedule_work(&ht->run_work); |
544 | } | 538 | } |
545 | 539 | ||
@@ -913,6 +907,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
913 | { | 907 | { |
914 | struct bucket_table *tbl; | 908 | struct bucket_table *tbl; |
915 | size_t size; | 909 | size_t size; |
910 | u32 hash_rnd; | ||
916 | 911 | ||
917 | size = HASH_DEFAULT_SIZE; | 912 | size = HASH_DEFAULT_SIZE; |
918 | 913 | ||
@@ -939,14 +934,14 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
939 | else | 934 | else |
940 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; | 935 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; |
941 | 936 | ||
942 | tbl = bucket_table_alloc(ht, size); | 937 | get_random_bytes(&hash_rnd, sizeof(hash_rnd)); |
938 | |||
939 | tbl = bucket_table_alloc(ht, size, hash_rnd); | ||
943 | if (tbl == NULL) | 940 | if (tbl == NULL) |
944 | return -ENOMEM; | 941 | return -ENOMEM; |
945 | 942 | ||
946 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); | ||
947 | |||
948 | atomic_set(&ht->nelems, 0); | 943 | atomic_set(&ht->nelems, 0); |
949 | atomic_set(&ht->shift, ilog2(tbl->size)); | 944 | |
950 | RCU_INIT_POINTER(ht->tbl, tbl); | 945 | RCU_INIT_POINTER(ht->tbl, tbl); |
951 | RCU_INIT_POINTER(ht->future_tbl, tbl); | 946 | RCU_INIT_POINTER(ht->future_tbl, tbl); |
952 | 947 | ||