diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2017-05-02 05:02:41 -0400 | 
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2017-05-02 05:02:41 -0400 | 
| commit | 4d6ca227c768b50b05cf183974b40abe444e9d0c (patch) | |
| tree | bf953d8e895281053548b9967a2c4b58d641df00 /lib/rhashtable.c | |
| parent | 800f3eef8ebc1264e9c135bfa892c8ae41fa4792 (diff) | |
| parent | af22a610bc38508d5ea760507d31be6b6983dfa8 (diff) | |
Merge branch 'for-4.12/asus' into for-linus
Diffstat (limited to 'lib/rhashtable.c')
| -rw-r--r-- | lib/rhashtable.c | 274 | 
1 files changed, 222 insertions, 52 deletions
| diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 32d0ad058380..f8635fd57442 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/init.h> | 19 | #include <linux/init.h> | 
| 20 | #include <linux/log2.h> | 20 | #include <linux/log2.h> | 
| 21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> | 
| 22 | #include <linux/rculist.h> | ||
| 22 | #include <linux/slab.h> | 23 | #include <linux/slab.h> | 
| 23 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> | 
| 24 | #include <linux/mm.h> | 25 | #include <linux/mm.h> | 
| @@ -32,6 +33,11 @@ | |||
| 32 | #define HASH_MIN_SIZE 4U | 33 | #define HASH_MIN_SIZE 4U | 
| 33 | #define BUCKET_LOCKS_PER_CPU 32UL | 34 | #define BUCKET_LOCKS_PER_CPU 32UL | 
| 34 | 35 | ||
| 36 | union nested_table { | ||
| 37 | union nested_table __rcu *table; | ||
| 38 | struct rhash_head __rcu *bucket; | ||
| 39 | }; | ||
| 40 | |||
| 35 | static u32 head_hashfn(struct rhashtable *ht, | 41 | static u32 head_hashfn(struct rhashtable *ht, | 
| 36 | const struct bucket_table *tbl, | 42 | const struct bucket_table *tbl, | 
| 37 | const struct rhash_head *he) | 43 | const struct rhash_head *he) | 
| @@ -76,6 +82,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, | |||
| 76 | /* Never allocate more than 0.5 locks per bucket */ | 82 | /* Never allocate more than 0.5 locks per bucket */ | 
| 77 | size = min_t(unsigned int, size, tbl->size >> 1); | 83 | size = min_t(unsigned int, size, tbl->size >> 1); | 
| 78 | 84 | ||
| 85 | if (tbl->nest) | ||
| 86 | size = min(size, 1U << tbl->nest); | ||
| 87 | |||
| 79 | if (sizeof(spinlock_t) != 0) { | 88 | if (sizeof(spinlock_t) != 0) { | 
| 80 | tbl->locks = NULL; | 89 | tbl->locks = NULL; | 
| 81 | #ifdef CONFIG_NUMA | 90 | #ifdef CONFIG_NUMA | 
| @@ -99,11 +108,46 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, | |||
| 99 | return 0; | 108 | return 0; | 
| 100 | } | 109 | } | 
| 101 | 110 | ||
| 111 | static void nested_table_free(union nested_table *ntbl, unsigned int size) | ||
| 112 | { | ||
| 113 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | ||
| 114 | const unsigned int len = 1 << shift; | ||
| 115 | unsigned int i; | ||
| 116 | |||
| 117 | ntbl = rcu_dereference_raw(ntbl->table); | ||
| 118 | if (!ntbl) | ||
| 119 | return; | ||
| 120 | |||
| 121 | if (size > len) { | ||
| 122 | size >>= shift; | ||
| 123 | for (i = 0; i < len; i++) | ||
| 124 | nested_table_free(ntbl + i, size); | ||
| 125 | } | ||
| 126 | |||
| 127 | kfree(ntbl); | ||
| 128 | } | ||
| 129 | |||
| 130 | static void nested_bucket_table_free(const struct bucket_table *tbl) | ||
| 131 | { | ||
| 132 | unsigned int size = tbl->size >> tbl->nest; | ||
| 133 | unsigned int len = 1 << tbl->nest; | ||
| 134 | union nested_table *ntbl; | ||
| 135 | unsigned int i; | ||
| 136 | |||
| 137 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | ||
| 138 | |||
| 139 | for (i = 0; i < len; i++) | ||
| 140 | nested_table_free(ntbl + i, size); | ||
| 141 | |||
| 142 | kfree(ntbl); | ||
| 143 | } | ||
| 144 | |||
| 102 | static void bucket_table_free(const struct bucket_table *tbl) | 145 | static void bucket_table_free(const struct bucket_table *tbl) | 
| 103 | { | 146 | { | 
| 104 | if (tbl) | 147 | if (tbl->nest) | 
| 105 | kvfree(tbl->locks); | 148 | nested_bucket_table_free(tbl); | 
| 106 | 149 | ||
| 150 | kvfree(tbl->locks); | ||
| 107 | kvfree(tbl); | 151 | kvfree(tbl); | 
| 108 | } | 152 | } | 
| 109 | 153 | ||
| @@ -112,6 +156,59 @@ static void bucket_table_free_rcu(struct rcu_head *head) | |||
| 112 | bucket_table_free(container_of(head, struct bucket_table, rcu)); | 156 | bucket_table_free(container_of(head, struct bucket_table, rcu)); | 
| 113 | } | 157 | } | 
| 114 | 158 | ||
| 159 | static union nested_table *nested_table_alloc(struct rhashtable *ht, | ||
| 160 | union nested_table __rcu **prev, | ||
| 161 | unsigned int shifted, | ||
| 162 | unsigned int nhash) | ||
| 163 | { | ||
| 164 | union nested_table *ntbl; | ||
| 165 | int i; | ||
| 166 | |||
| 167 | ntbl = rcu_dereference(*prev); | ||
| 168 | if (ntbl) | ||
| 169 | return ntbl; | ||
| 170 | |||
| 171 | ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); | ||
| 172 | |||
| 173 | if (ntbl && shifted) { | ||
| 174 | for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) | ||
| 175 | INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, | ||
| 176 | (i << shifted) | nhash); | ||
| 177 | } | ||
| 178 | |||
| 179 | rcu_assign_pointer(*prev, ntbl); | ||
| 180 | |||
| 181 | return ntbl; | ||
| 182 | } | ||
| 183 | |||
| 184 | static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, | ||
| 185 | size_t nbuckets, | ||
| 186 | gfp_t gfp) | ||
| 187 | { | ||
| 188 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | ||
| 189 | struct bucket_table *tbl; | ||
| 190 | size_t size; | ||
| 191 | |||
| 192 | if (nbuckets < (1 << (shift + 1))) | ||
| 193 | return NULL; | ||
| 194 | |||
| 195 | size = sizeof(*tbl) + sizeof(tbl->buckets[0]); | ||
| 196 | |||
| 197 | tbl = kzalloc(size, gfp); | ||
| 198 | if (!tbl) | ||
| 199 | return NULL; | ||
| 200 | |||
| 201 | if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, | ||
| 202 | 0, 0)) { | ||
| 203 | kfree(tbl); | ||
| 204 | return NULL; | ||
| 205 | } | ||
| 206 | |||
| 207 | tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; | ||
| 208 | |||
| 209 | return tbl; | ||
| 210 | } | ||
| 211 | |||
| 115 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | 212 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | 
| 116 | size_t nbuckets, | 213 | size_t nbuckets, | 
| 117 | gfp_t gfp) | 214 | gfp_t gfp) | 
| @@ -126,10 +223,17 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
| 126 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); | 223 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); | 
| 127 | if (tbl == NULL && gfp == GFP_KERNEL) | 224 | if (tbl == NULL && gfp == GFP_KERNEL) | 
| 128 | tbl = vzalloc(size); | 225 | tbl = vzalloc(size); | 
| 226 | |||
| 227 | size = nbuckets; | ||
| 228 | |||
| 229 | if (tbl == NULL && gfp != GFP_KERNEL) { | ||
| 230 | tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); | ||
| 231 | nbuckets = 0; | ||
| 232 | } | ||
| 129 | if (tbl == NULL) | 233 | if (tbl == NULL) | 
| 130 | return NULL; | 234 | return NULL; | 
| 131 | 235 | ||
| 132 | tbl->size = nbuckets; | 236 | tbl->size = size; | 
| 133 | 237 | ||
| 134 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { | 238 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { | 
| 135 | bucket_table_free(tbl); | 239 | bucket_table_free(tbl); | 
| @@ -164,12 +268,17 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) | |||
| 164 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 268 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 
| 165 | struct bucket_table *new_tbl = rhashtable_last_table(ht, | 269 | struct bucket_table *new_tbl = rhashtable_last_table(ht, | 
| 166 | rht_dereference_rcu(old_tbl->future_tbl, ht)); | 270 | rht_dereference_rcu(old_tbl->future_tbl, ht)); | 
| 167 | struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; | 271 | struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); | 
| 168 | int err = -ENOENT; | 272 | int err = -EAGAIN; | 
| 169 | struct rhash_head *head, *next, *entry; | 273 | struct rhash_head *head, *next, *entry; | 
| 170 | spinlock_t *new_bucket_lock; | 274 | spinlock_t *new_bucket_lock; | 
| 171 | unsigned int new_hash; | 275 | unsigned int new_hash; | 
| 172 | 276 | ||
| 277 | if (new_tbl->nest) | ||
| 278 | goto out; | ||
| 279 | |||
| 280 | err = -ENOENT; | ||
| 281 | |||
| 173 | rht_for_each(entry, old_tbl, old_hash) { | 282 | rht_for_each(entry, old_tbl, old_hash) { | 
| 174 | err = 0; | 283 | err = 0; | 
| 175 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); | 284 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); | 
| @@ -202,19 +311,26 @@ out: | |||
| 202 | return err; | 311 | return err; | 
| 203 | } | 312 | } | 
| 204 | 313 | ||
| 205 | static void rhashtable_rehash_chain(struct rhashtable *ht, | 314 | static int rhashtable_rehash_chain(struct rhashtable *ht, | 
| 206 | unsigned int old_hash) | 315 | unsigned int old_hash) | 
| 207 | { | 316 | { | 
| 208 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 317 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 
| 209 | spinlock_t *old_bucket_lock; | 318 | spinlock_t *old_bucket_lock; | 
| 319 | int err; | ||
| 210 | 320 | ||
| 211 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); | 321 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); | 
| 212 | 322 | ||
| 213 | spin_lock_bh(old_bucket_lock); | 323 | spin_lock_bh(old_bucket_lock); | 
| 214 | while (!rhashtable_rehash_one(ht, old_hash)) | 324 | while (!(err = rhashtable_rehash_one(ht, old_hash))) | 
| 215 | ; | 325 | ; | 
| 216 | old_tbl->rehash++; | 326 | |
| 327 | if (err == -ENOENT) { | ||
| 328 | old_tbl->rehash++; | ||
| 329 | err = 0; | ||
| 330 | } | ||
| 217 | spin_unlock_bh(old_bucket_lock); | 331 | spin_unlock_bh(old_bucket_lock); | 
| 332 | |||
| 333 | return err; | ||
| 218 | } | 334 | } | 
| 219 | 335 | ||
| 220 | static int rhashtable_rehash_attach(struct rhashtable *ht, | 336 | static int rhashtable_rehash_attach(struct rhashtable *ht, | 
| @@ -246,13 +362,17 @@ static int rhashtable_rehash_table(struct rhashtable *ht) | |||
| 246 | struct bucket_table *new_tbl; | 362 | struct bucket_table *new_tbl; | 
| 247 | struct rhashtable_walker *walker; | 363 | struct rhashtable_walker *walker; | 
| 248 | unsigned int old_hash; | 364 | unsigned int old_hash; | 
| 365 | int err; | ||
| 249 | 366 | ||
| 250 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | 367 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | 
| 251 | if (!new_tbl) | 368 | if (!new_tbl) | 
| 252 | return 0; | 369 | return 0; | 
| 253 | 370 | ||
| 254 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) | 371 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { | 
| 255 | rhashtable_rehash_chain(ht, old_hash); | 372 | err = rhashtable_rehash_chain(ht, old_hash); | 
| 373 | if (err) | ||
| 374 | return err; | ||
| 375 | } | ||
| 256 | 376 | ||
| 257 | /* Publish the new table pointer. */ | 377 | /* Publish the new table pointer. */ | 
| 258 | rcu_assign_pointer(ht->tbl, new_tbl); | 378 | rcu_assign_pointer(ht->tbl, new_tbl); | 
| @@ -271,31 +391,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht) | |||
| 271 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; | 391 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; | 
| 272 | } | 392 | } | 
| 273 | 393 | ||
| 274 | /** | 394 | static int rhashtable_rehash_alloc(struct rhashtable *ht, | 
| 275 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | 395 | struct bucket_table *old_tbl, | 
| 276 | * @ht: the hash table to expand | 396 | unsigned int size) | 
| 277 | * | ||
| 278 | * A secondary bucket array is allocated and the hash entries are migrated. | ||
| 279 | * | ||
| 280 | * This function may only be called in a context where it is safe to call | ||
| 281 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | ||
| 282 | * | ||
| 283 | * The caller must ensure that no concurrent resizing occurs by holding | ||
| 284 | * ht->mutex. | ||
| 285 | * | ||
| 286 | * It is valid to have concurrent insertions and deletions protected by per | ||
| 287 | * bucket locks or concurrent RCU protected lookups and traversals. | ||
| 288 | */ | ||
| 289 | static int rhashtable_expand(struct rhashtable *ht) | ||
| 290 | { | 397 | { | 
| 291 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 398 | struct bucket_table *new_tbl; | 
| 292 | int err; | 399 | int err; | 
| 293 | 400 | ||
| 294 | ASSERT_RHT_MUTEX(ht); | 401 | ASSERT_RHT_MUTEX(ht); | 
| 295 | 402 | ||
| 296 | old_tbl = rhashtable_last_table(ht, old_tbl); | 403 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); | 
| 297 | |||
| 298 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); | ||
| 299 | if (new_tbl == NULL) | 404 | if (new_tbl == NULL) | 
| 300 | return -ENOMEM; | 405 | return -ENOMEM; | 
| 301 | 406 | ||
| @@ -324,12 +429,9 @@ static int rhashtable_expand(struct rhashtable *ht) | |||
| 324 | */ | 429 | */ | 
| 325 | static int rhashtable_shrink(struct rhashtable *ht) | 430 | static int rhashtable_shrink(struct rhashtable *ht) | 
| 326 | { | 431 | { | 
| 327 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 432 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | 
| 328 | unsigned int nelems = atomic_read(&ht->nelems); | 433 | unsigned int nelems = atomic_read(&ht->nelems); | 
| 329 | unsigned int size = 0; | 434 | unsigned int size = 0; | 
| 330 | int err; | ||
| 331 | |||
| 332 | ASSERT_RHT_MUTEX(ht); | ||
| 333 | 435 | ||
| 334 | if (nelems) | 436 | if (nelems) | 
| 335 | size = roundup_pow_of_two(nelems * 3 / 2); | 437 | size = roundup_pow_of_two(nelems * 3 / 2); | 
| @@ -342,15 +444,7 @@ static int rhashtable_shrink(struct rhashtable *ht) | |||
| 342 | if (rht_dereference(old_tbl->future_tbl, ht)) | 444 | if (rht_dereference(old_tbl->future_tbl, ht)) | 
| 343 | return -EEXIST; | 445 | return -EEXIST; | 
| 344 | 446 | ||
| 345 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); | 447 | return rhashtable_rehash_alloc(ht, old_tbl, size); | 
| 346 | if (new_tbl == NULL) | ||
| 347 | return -ENOMEM; | ||
| 348 | |||
| 349 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); | ||
| 350 | if (err) | ||
| 351 | bucket_table_free(new_tbl); | ||
| 352 | |||
| 353 | return err; | ||
| 354 | } | 448 | } | 
| 355 | 449 | ||
| 356 | static void rht_deferred_worker(struct work_struct *work) | 450 | static void rht_deferred_worker(struct work_struct *work) | 
| @@ -366,11 +460,14 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 366 | tbl = rhashtable_last_table(ht, tbl); | 460 | tbl = rhashtable_last_table(ht, tbl); | 
| 367 | 461 | ||
| 368 | if (rht_grow_above_75(ht, tbl)) | 462 | if (rht_grow_above_75(ht, tbl)) | 
| 369 | rhashtable_expand(ht); | 463 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); | 
| 370 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) | 464 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) | 
| 371 | rhashtable_shrink(ht); | 465 | err = rhashtable_shrink(ht); | 
| 466 | else if (tbl->nest) | ||
| 467 | err = rhashtable_rehash_alloc(ht, tbl, tbl->size); | ||
| 372 | 468 | ||
| 373 | err = rhashtable_rehash_table(ht); | 469 | if (!err) | 
| 470 | err = rhashtable_rehash_table(ht); | ||
| 374 | 471 | ||
| 375 | mutex_unlock(&ht->mutex); | 472 | mutex_unlock(&ht->mutex); | 
| 376 | 473 | ||
| @@ -439,8 +536,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, | |||
| 439 | int elasticity; | 536 | int elasticity; | 
| 440 | 537 | ||
| 441 | elasticity = ht->elasticity; | 538 | elasticity = ht->elasticity; | 
| 442 | pprev = &tbl->buckets[hash]; | 539 | pprev = rht_bucket_var(tbl, hash); | 
| 443 | rht_for_each(head, tbl, hash) { | 540 | rht_for_each_continue(head, *pprev, tbl, hash) { | 
| 444 | struct rhlist_head *list; | 541 | struct rhlist_head *list; | 
| 445 | struct rhlist_head *plist; | 542 | struct rhlist_head *plist; | 
| 446 | 543 | ||
| @@ -477,6 +574,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |||
| 477 | struct rhash_head *obj, | 574 | struct rhash_head *obj, | 
| 478 | void *data) | 575 | void *data) | 
| 479 | { | 576 | { | 
| 577 | struct rhash_head __rcu **pprev; | ||
| 480 | struct bucket_table *new_tbl; | 578 | struct bucket_table *new_tbl; | 
| 481 | struct rhash_head *head; | 579 | struct rhash_head *head; | 
| 482 | 580 | ||
| @@ -499,7 +597,11 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |||
| 499 | if (unlikely(rht_grow_above_100(ht, tbl))) | 597 | if (unlikely(rht_grow_above_100(ht, tbl))) | 
| 500 | return ERR_PTR(-EAGAIN); | 598 | return ERR_PTR(-EAGAIN); | 
| 501 | 599 | ||
| 502 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | 600 | pprev = rht_bucket_insert(ht, tbl, hash); | 
| 601 | if (!pprev) | ||
| 602 | return ERR_PTR(-ENOMEM); | ||
| 603 | |||
| 604 | head = rht_dereference_bucket(*pprev, tbl, hash); | ||
| 503 | 605 | ||
| 504 | RCU_INIT_POINTER(obj->next, head); | 606 | RCU_INIT_POINTER(obj->next, head); | 
| 505 | if (ht->rhlist) { | 607 | if (ht->rhlist) { | 
| @@ -509,7 +611,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, | |||
| 509 | RCU_INIT_POINTER(list->next, NULL); | 611 | RCU_INIT_POINTER(list->next, NULL); | 
| 510 | } | 612 | } | 
| 511 | 613 | ||
| 512 | rcu_assign_pointer(tbl->buckets[hash], obj); | 614 | rcu_assign_pointer(*pprev, obj); | 
| 513 | 615 | ||
| 514 | atomic_inc(&ht->nelems); | 616 | atomic_inc(&ht->nelems); | 
| 515 | if (rht_grow_above_75(ht, tbl)) | 617 | if (rht_grow_above_75(ht, tbl)) | 
| @@ -975,7 +1077,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, | |||
| 975 | void (*free_fn)(void *ptr, void *arg), | 1077 | void (*free_fn)(void *ptr, void *arg), | 
| 976 | void *arg) | 1078 | void *arg) | 
| 977 | { | 1079 | { | 
| 978 | const struct bucket_table *tbl; | 1080 | struct bucket_table *tbl; | 
| 979 | unsigned int i; | 1081 | unsigned int i; | 
| 980 | 1082 | ||
| 981 | cancel_work_sync(&ht->run_work); | 1083 | cancel_work_sync(&ht->run_work); | 
| @@ -986,7 +1088,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, | |||
| 986 | for (i = 0; i < tbl->size; i++) { | 1088 | for (i = 0; i < tbl->size; i++) { | 
| 987 | struct rhash_head *pos, *next; | 1089 | struct rhash_head *pos, *next; | 
| 988 | 1090 | ||
| 989 | for (pos = rht_dereference(tbl->buckets[i], ht), | 1091 | for (pos = rht_dereference(*rht_bucket(tbl, i), ht), | 
| 990 | next = !rht_is_a_nulls(pos) ? | 1092 | next = !rht_is_a_nulls(pos) ? | 
| 991 | rht_dereference(pos->next, ht) : NULL; | 1093 | rht_dereference(pos->next, ht) : NULL; | 
| 992 | !rht_is_a_nulls(pos); | 1094 | !rht_is_a_nulls(pos); | 
| @@ -1007,3 +1109,71 @@ void rhashtable_destroy(struct rhashtable *ht) | |||
| 1007 | return rhashtable_free_and_destroy(ht, NULL, NULL); | 1109 | return rhashtable_free_and_destroy(ht, NULL, NULL); | 
| 1008 | } | 1110 | } | 
| 1009 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | 1111 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | 
| 1112 | |||
| 1113 | struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, | ||
| 1114 | unsigned int hash) | ||
| 1115 | { | ||
| 1116 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | ||
| 1117 | static struct rhash_head __rcu *rhnull = | ||
| 1118 | (struct rhash_head __rcu *)NULLS_MARKER(0); | ||
| 1119 | unsigned int index = hash & ((1 << tbl->nest) - 1); | ||
| 1120 | unsigned int size = tbl->size >> tbl->nest; | ||
| 1121 | unsigned int subhash = hash; | ||
| 1122 | union nested_table *ntbl; | ||
| 1123 | |||
| 1124 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | ||
| 1125 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); | ||
| 1126 | subhash >>= tbl->nest; | ||
| 1127 | |||
| 1128 | while (ntbl && size > (1 << shift)) { | ||
| 1129 | index = subhash & ((1 << shift) - 1); | ||
| 1130 | ntbl = rht_dereference_bucket_rcu(ntbl[index].table, | ||
| 1131 | tbl, hash); | ||
| 1132 | size >>= shift; | ||
| 1133 | subhash >>= shift; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | if (!ntbl) | ||
| 1137 | return &rhnull; | ||
| 1138 | |||
| 1139 | return &ntbl[subhash].bucket; | ||
| 1140 | |||
| 1141 | } | ||
| 1142 | EXPORT_SYMBOL_GPL(rht_bucket_nested); | ||
| 1143 | |||
| 1144 | struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, | ||
| 1145 | struct bucket_table *tbl, | ||
| 1146 | unsigned int hash) | ||
| 1147 | { | ||
| 1148 | const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); | ||
| 1149 | unsigned int index = hash & ((1 << tbl->nest) - 1); | ||
| 1150 | unsigned int size = tbl->size >> tbl->nest; | ||
| 1151 | union nested_table *ntbl; | ||
| 1152 | unsigned int shifted; | ||
| 1153 | unsigned int nhash; | ||
| 1154 | |||
| 1155 | ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); | ||
| 1156 | hash >>= tbl->nest; | ||
| 1157 | nhash = index; | ||
| 1158 | shifted = tbl->nest; | ||
| 1159 | ntbl = nested_table_alloc(ht, &ntbl[index].table, | ||
| 1160 | size <= (1 << shift) ? shifted : 0, nhash); | ||
| 1161 | |||
| 1162 | while (ntbl && size > (1 << shift)) { | ||
| 1163 | index = hash & ((1 << shift) - 1); | ||
| 1164 | size >>= shift; | ||
| 1165 | hash >>= shift; | ||
| 1166 | nhash |= index << shifted; | ||
| 1167 | shifted += shift; | ||
| 1168 | ntbl = nested_table_alloc(ht, &ntbl[index].table, | ||
| 1169 | size <= (1 << shift) ? shifted : 0, | ||
| 1170 | nhash); | ||
| 1171 | } | ||
| 1172 | |||
| 1173 | if (!ntbl) | ||
| 1174 | return NULL; | ||
| 1175 | |||
| 1176 | return &ntbl[hash].bucket; | ||
| 1177 | |||
| 1178 | } | ||
| 1179 | EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); | ||
