diff options
| author | Olof Johansson <olof@lixom.net> | 2015-04-03 16:22:39 -0400 |
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2015-04-03 16:22:39 -0400 |
| commit | ee327179b9f5f9c0259f43493a5a7e96854094de (patch) | |
| tree | bb45459f621a67218cd5fd580cc19b724b5bf777 /lib/rhashtable.c | |
| parent | 6054ef25e20219a604429c1437bc601f8ead87a4 (diff) | |
| parent | 83c3a7d4ac7fdc29a64bf9a5467a36b4c72a1eed (diff) | |
Merge tag 'omap-for-v4.1/wl12xx-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/dt
Merge "wireless wl12xx and omap device tree changes for v4.1" from Tony
Lindgren:
Wireless and omap changes to make wl12xx driver to use device tree
data instead of platform data from Eliad Peller <eliad@wizery.com>:
- Add device-tree support to the wlcore (wl12xx/wl18xx) driver.
- Update the current users to use the bindings instead of pdata-quirks.
- Finally, remove the deprecated wl12xx_platform_data struct
Note that da850 board file code that still uses the platform data,
but we have da850.dtsi that can be used instead. So it was decided
that we should try to remove the wl12xx support from the da850
board file as suggested by Sekhar Nori <nsekhar@ti.com>. As it's
the last patch in the series, the last patch can be simply reverted
if needed.
As this series touches quite a bit of arch code, it was suggested
by Kalle Valo <kvalo@codeaurora.org> that the whole series should
be merged via the arm-soc tree.
* tag 'omap-for-v4.1/wl12xx-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
wlcore: remove wl12xx_platform_data
ARM: dts: add wl12xx/wl18xx bindings
wlcore: add device-tree support
dt: bindings: add TI's wilink wireless device
wl12xx: use frequency instead of enumerations for pdata clocks
wlcore: set irq_trigger in board files instead of hiding behind a quirk
+ Linux 4.0-rc4
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'lib/rhashtable.c')
| -rw-r--r-- | lib/rhashtable.c | 62 |
1 files changed, 28 insertions, 34 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9cc4c4a90d00..b5344ef4c684 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 19 | #include <linux/log2.h> | 19 | #include <linux/log2.h> |
| 20 | #include <linux/sched.h> | ||
| 20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
| 21 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
| 22 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
| @@ -217,15 +218,15 @@ static void bucket_table_free(const struct bucket_table *tbl) | |||
| 217 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | 218 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
| 218 | size_t nbuckets) | 219 | size_t nbuckets) |
| 219 | { | 220 | { |
| 220 | struct bucket_table *tbl; | 221 | struct bucket_table *tbl = NULL; |
| 221 | size_t size; | 222 | size_t size; |
| 222 | int i; | 223 | int i; |
| 223 | 224 | ||
| 224 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 225 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
| 225 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); | 226 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) |
| 227 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); | ||
| 226 | if (tbl == NULL) | 228 | if (tbl == NULL) |
| 227 | tbl = vzalloc(size); | 229 | tbl = vzalloc(size); |
| 228 | |||
| 229 | if (tbl == NULL) | 230 | if (tbl == NULL) |
| 230 | return NULL; | 231 | return NULL; |
| 231 | 232 | ||
| @@ -247,26 +248,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | |||
| 247 | * @ht: hash table | 248 | * @ht: hash table |
| 248 | * @new_size: new table size | 249 | * @new_size: new table size |
| 249 | */ | 250 | */ |
| 250 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) | 251 | static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) |
| 251 | { | 252 | { |
| 252 | /* Expand table when exceeding 75% load */ | 253 | /* Expand table when exceeding 75% load */ |
| 253 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && | 254 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && |
| 254 | (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); | 255 | (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); |
| 255 | } | 256 | } |
| 256 | EXPORT_SYMBOL_GPL(rht_grow_above_75); | ||
| 257 | 257 | ||
| 258 | /** | 258 | /** |
| 259 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size | 259 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size |
| 260 | * @ht: hash table | 260 | * @ht: hash table |
| 261 | * @new_size: new table size | 261 | * @new_size: new table size |
| 262 | */ | 262 | */ |
| 263 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) | 263 | static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) |
| 264 | { | 264 | { |
| 265 | /* Shrink table beneath 30% load */ | 265 | /* Shrink table beneath 30% load */ |
| 266 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && | 266 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && |
| 267 | (atomic_read(&ht->shift) > ht->p.min_shift); | 267 | (atomic_read(&ht->shift) > ht->p.min_shift); |
| 268 | } | 268 | } |
| 269 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); | ||
| 270 | 269 | ||
| 271 | static void lock_buckets(struct bucket_table *new_tbl, | 270 | static void lock_buckets(struct bucket_table *new_tbl, |
| 272 | struct bucket_table *old_tbl, unsigned int hash) | 271 | struct bucket_table *old_tbl, unsigned int hash) |
| @@ -414,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht) | |||
| 414 | } | 413 | } |
| 415 | } | 414 | } |
| 416 | unlock_buckets(new_tbl, old_tbl, new_hash); | 415 | unlock_buckets(new_tbl, old_tbl, new_hash); |
| 416 | cond_resched(); | ||
| 417 | } | 417 | } |
| 418 | 418 | ||
| 419 | /* Unzip interleaved hash chains */ | 419 | /* Unzip interleaved hash chains */ |
| @@ -437,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht) | |||
| 437 | complete = false; | 437 | complete = false; |
| 438 | 438 | ||
| 439 | unlock_buckets(new_tbl, old_tbl, old_hash); | 439 | unlock_buckets(new_tbl, old_tbl, old_hash); |
| 440 | cond_resched(); | ||
| 440 | } | 441 | } |
| 441 | } | 442 | } |
| 442 | 443 | ||
| @@ -495,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht) | |||
| 495 | tbl->buckets[new_hash + new_tbl->size]); | 496 | tbl->buckets[new_hash + new_tbl->size]); |
| 496 | 497 | ||
| 497 | unlock_buckets(new_tbl, tbl, new_hash); | 498 | unlock_buckets(new_tbl, tbl, new_hash); |
| 499 | cond_resched(); | ||
| 498 | } | 500 | } |
| 499 | 501 | ||
| 500 | /* Publish the new, valid hash table */ | 502 | /* Publish the new, valid hash table */ |
| @@ -528,31 +530,19 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 528 | list_for_each_entry(walker, &ht->walkers, list) | 530 | list_for_each_entry(walker, &ht->walkers, list) |
| 529 | walker->resize = true; | 531 | walker->resize = true; |
| 530 | 532 | ||
| 531 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 533 | if (rht_grow_above_75(ht, tbl->size)) |
| 532 | rhashtable_expand(ht); | 534 | rhashtable_expand(ht); |
| 533 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) | 535 | else if (rht_shrink_below_30(ht, tbl->size)) |
| 534 | rhashtable_shrink(ht); | 536 | rhashtable_shrink(ht); |
| 535 | |||
| 536 | unlock: | 537 | unlock: |
| 537 | mutex_unlock(&ht->mutex); | 538 | mutex_unlock(&ht->mutex); |
| 538 | } | 539 | } |
| 539 | 540 | ||
| 540 | static void rhashtable_wakeup_worker(struct rhashtable *ht) | ||
| 541 | { | ||
| 542 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 543 | struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); | ||
| 544 | size_t size = tbl->size; | ||
| 545 | |||
| 546 | /* Only adjust the table if no resizing is currently in progress. */ | ||
| 547 | if (tbl == new_tbl && | ||
| 548 | ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || | ||
| 549 | (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) | ||
| 550 | schedule_work(&ht->run_work); | ||
| 551 | } | ||
| 552 | |||
| 553 | static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | 541 | static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, |
| 554 | struct bucket_table *tbl, u32 hash) | 542 | struct bucket_table *tbl, |
| 543 | const struct bucket_table *old_tbl, u32 hash) | ||
| 555 | { | 544 | { |
| 545 | bool no_resize_running = tbl == old_tbl; | ||
| 556 | struct rhash_head *head; | 546 | struct rhash_head *head; |
| 557 | 547 | ||
| 558 | hash = rht_bucket_index(tbl, hash); | 548 | hash = rht_bucket_index(tbl, hash); |
| @@ -568,8 +558,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | |||
| 568 | rcu_assign_pointer(tbl->buckets[hash], obj); | 558 | rcu_assign_pointer(tbl->buckets[hash], obj); |
| 569 | 559 | ||
| 570 | atomic_inc(&ht->nelems); | 560 | atomic_inc(&ht->nelems); |
| 571 | 561 | if (no_resize_running && rht_grow_above_75(ht, tbl->size)) | |
| 572 | rhashtable_wakeup_worker(ht); | 562 | schedule_work(&ht->run_work); |
| 573 | } | 563 | } |
| 574 | 564 | ||
| 575 | /** | 565 | /** |
| @@ -599,7 +589,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) | |||
| 599 | hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); | 589 | hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); |
| 600 | 590 | ||
| 601 | lock_buckets(tbl, old_tbl, hash); | 591 | lock_buckets(tbl, old_tbl, hash); |
| 602 | __rhashtable_insert(ht, obj, tbl, hash); | 592 | __rhashtable_insert(ht, obj, tbl, old_tbl, hash); |
| 603 | unlock_buckets(tbl, old_tbl, hash); | 593 | unlock_buckets(tbl, old_tbl, hash); |
| 604 | 594 | ||
| 605 | rcu_read_unlock(); | 595 | rcu_read_unlock(); |
| @@ -681,8 +671,11 @@ found: | |||
| 681 | unlock_buckets(new_tbl, old_tbl, new_hash); | 671 | unlock_buckets(new_tbl, old_tbl, new_hash); |
| 682 | 672 | ||
| 683 | if (ret) { | 673 | if (ret) { |
| 674 | bool no_resize_running = new_tbl == old_tbl; | ||
| 675 | |||
| 684 | atomic_dec(&ht->nelems); | 676 | atomic_dec(&ht->nelems); |
| 685 | rhashtable_wakeup_worker(ht); | 677 | if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size)) |
| 678 | schedule_work(&ht->run_work); | ||
| 686 | } | 679 | } |
| 687 | 680 | ||
| 688 | rcu_read_unlock(); | 681 | rcu_read_unlock(); |
| @@ -852,7 +845,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, | |||
| 852 | goto exit; | 845 | goto exit; |
| 853 | } | 846 | } |
| 854 | 847 | ||
| 855 | __rhashtable_insert(ht, obj, new_tbl, new_hash); | 848 | __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash); |
| 856 | 849 | ||
| 857 | exit: | 850 | exit: |
| 858 | unlock_buckets(new_tbl, old_tbl, new_hash); | 851 | unlock_buckets(new_tbl, old_tbl, new_hash); |
| @@ -894,6 +887,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |||
| 894 | if (!iter->walker) | 887 | if (!iter->walker) |
| 895 | return -ENOMEM; | 888 | return -ENOMEM; |
| 896 | 889 | ||
| 890 | INIT_LIST_HEAD(&iter->walker->list); | ||
| 891 | iter->walker->resize = false; | ||
| 892 | |||
| 897 | mutex_lock(&ht->mutex); | 893 | mutex_lock(&ht->mutex); |
| 898 | list_add(&iter->walker->list, &ht->walkers); | 894 | list_add(&iter->walker->list, &ht->walkers); |
| 899 | mutex_unlock(&ht->mutex); | 895 | mutex_unlock(&ht->mutex); |
| @@ -1111,8 +1107,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
| 1111 | if (!ht->p.hash_rnd) | 1107 | if (!ht->p.hash_rnd) |
| 1112 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); | 1108 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); |
| 1113 | 1109 | ||
| 1114 | if (ht->p.grow_decision || ht->p.shrink_decision) | 1110 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
| 1115 | INIT_WORK(&ht->run_work, rht_deferred_worker); | ||
| 1116 | 1111 | ||
| 1117 | return 0; | 1112 | return 0; |
| 1118 | } | 1113 | } |
| @@ -1130,8 +1125,7 @@ void rhashtable_destroy(struct rhashtable *ht) | |||
| 1130 | { | 1125 | { |
| 1131 | ht->being_destroyed = true; | 1126 | ht->being_destroyed = true; |
| 1132 | 1127 | ||
| 1133 | if (ht->p.grow_decision || ht->p.shrink_decision) | 1128 | cancel_work_sync(&ht->run_work); |
| 1134 | cancel_work_sync(&ht->run_work); | ||
| 1135 | 1129 | ||
| 1136 | mutex_lock(&ht->mutex); | 1130 | mutex_lock(&ht->mutex); |
| 1137 | bucket_table_free(rht_dereference(ht->tbl, ht)); | 1131 | bucket_table_free(rht_dereference(ht->tbl, ht)); |
