diff options
Diffstat (limited to 'lib/rhashtable.c')
| -rw-r--r-- | lib/rhashtable.c | 163 |
1 files changed, 163 insertions, 0 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 904b419b72f5..057919164e23 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -484,6 +484,7 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 484 | { | 484 | { |
| 485 | struct rhashtable *ht; | 485 | struct rhashtable *ht; |
| 486 | struct bucket_table *tbl; | 486 | struct bucket_table *tbl; |
| 487 | struct rhashtable_walker *walker; | ||
| 487 | 488 | ||
| 488 | ht = container_of(work, struct rhashtable, run_work); | 489 | ht = container_of(work, struct rhashtable, run_work); |
| 489 | mutex_lock(&ht->mutex); | 490 | mutex_lock(&ht->mutex); |
| @@ -492,6 +493,9 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 492 | 493 | ||
| 493 | tbl = rht_dereference(ht->tbl, ht); | 494 | tbl = rht_dereference(ht->tbl, ht); |
| 494 | 495 | ||
| 496 | list_for_each_entry(walker, &ht->walkers, list) | ||
| 497 | walker->resize = true; | ||
| 498 | |||
| 495 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 499 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) |
| 496 | rhashtable_expand(ht); | 500 | rhashtable_expand(ht); |
| 497 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) | 501 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) |
| @@ -822,6 +826,164 @@ exit: | |||
| 822 | } | 826 | } |
| 823 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); | 827 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); |
| 824 | 828 | ||
| 829 | /** | ||
| 830 | * rhashtable_walk_init - Initialise an iterator | ||
| 831 | * @ht: Table to walk over | ||
| 832 | * @iter: Hash table Iterator | ||
| 833 | * | ||
| 834 | * This function prepares a hash table walk. | ||
| 835 | * | ||
| 836 | * Note that if you restart a walk after rhashtable_walk_stop you | ||
| 837 | * may see the same object twice. Also, you may miss objects if | ||
| 838 | * there are removals in between rhashtable_walk_stop and the next | ||
| 839 | * call to rhashtable_walk_start. | ||
| 840 | * | ||
| 841 | * For a completely stable walk you should construct your own data | ||
| 842 | * structure outside the hash table. | ||
| 843 | * | ||
| 844 | * This function may sleep so you must not call it from interrupt | ||
| 845 | * context or with spin locks held. | ||
| 846 | * | ||
| 847 | * You must call rhashtable_walk_exit if this function returns | ||
| 848 | * successfully. | ||
| 849 | */ | ||
| 850 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | ||
| 851 | { | ||
| 852 | iter->ht = ht; | ||
| 853 | iter->p = NULL; | ||
| 854 | iter->slot = 0; | ||
| 855 | iter->skip = 0; | ||
| 856 | |||
| 857 | iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); | ||
| 858 | if (!iter->walker) | ||
| 859 | return -ENOMEM; | ||
| 860 | |||
| 861 | mutex_lock(&ht->mutex); | ||
| 862 | list_add(&iter->walker->list, &ht->walkers); | ||
| 863 | mutex_unlock(&ht->mutex); | ||
| 864 | |||
| 865 | return 0; | ||
| 866 | } | ||
| 867 | EXPORT_SYMBOL_GPL(rhashtable_walk_init); | ||
| 868 | |||
| 869 | /** | ||
| 870 | * rhashtable_walk_exit - Free an iterator | ||
| 871 | * @iter: Hash table Iterator | ||
| 872 | * | ||
| 873 | * This function frees resources allocated by rhashtable_walk_init. | ||
| 874 | */ | ||
| 875 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | ||
| 876 | { | ||
| 877 | mutex_lock(&iter->ht->mutex); | ||
| 878 | list_del(&iter->walker->list); | ||
| 879 | mutex_unlock(&iter->ht->mutex); | ||
| 880 | kfree(iter->walker); | ||
| 881 | } | ||
| 882 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | ||
| 883 | |||
| 884 | /** | ||
| 885 | * rhashtable_walk_start - Start a hash table walk | ||
| 886 | * @iter: Hash table iterator | ||
| 887 | * | ||
| 888 | * Start a hash table walk. Note that we take the RCU lock in all | ||
| 889 | * cases including when we return an error. So you must always call | ||
| 890 | * rhashtable_walk_stop to clean up. | ||
| 891 | * | ||
| 892 | * Returns zero if successful. | ||
| 893 | * | ||
| 894 | * Returns -EAGAIN if resize event occured. Note that the iterator | ||
| 895 | * will rewind back to the beginning and you may use it immediately | ||
| 896 | * by calling rhashtable_walk_next. | ||
| 897 | */ | ||
| 898 | int rhashtable_walk_start(struct rhashtable_iter *iter) | ||
| 899 | { | ||
| 900 | rcu_read_lock(); | ||
| 901 | |||
| 902 | if (iter->walker->resize) { | ||
| 903 | iter->slot = 0; | ||
| 904 | iter->skip = 0; | ||
| 905 | iter->walker->resize = false; | ||
| 906 | return -EAGAIN; | ||
| 907 | } | ||
| 908 | |||
| 909 | return 0; | ||
| 910 | } | ||
| 911 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); | ||
| 912 | |||
| 913 | /** | ||
| 914 | * rhashtable_walk_next - Return the next object and advance the iterator | ||
| 915 | * @iter: Hash table iterator | ||
| 916 | * | ||
| 917 | * Note that you must call rhashtable_walk_stop when you are finished | ||
| 918 | * with the walk. | ||
| 919 | * | ||
| 920 | * Returns the next object or NULL when the end of the table is reached. | ||
| 921 | * | ||
| 922 | * Returns -EAGAIN if resize event occured. Note that the iterator | ||
| 923 | * will rewind back to the beginning and you may continue to use it. | ||
| 924 | */ | ||
| 925 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | ||
| 926 | { | ||
| 927 | const struct bucket_table *tbl; | ||
| 928 | struct rhashtable *ht = iter->ht; | ||
| 929 | struct rhash_head *p = iter->p; | ||
| 930 | void *obj = NULL; | ||
| 931 | |||
| 932 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 933 | |||
| 934 | if (p) { | ||
| 935 | p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); | ||
| 936 | goto next; | ||
| 937 | } | ||
| 938 | |||
| 939 | for (; iter->slot < tbl->size; iter->slot++) { | ||
| 940 | int skip = iter->skip; | ||
| 941 | |||
| 942 | rht_for_each_rcu(p, tbl, iter->slot) { | ||
| 943 | if (!skip) | ||
| 944 | break; | ||
| 945 | skip--; | ||
| 946 | } | ||
| 947 | |||
| 948 | next: | ||
| 949 | if (!rht_is_a_nulls(p)) { | ||
| 950 | iter->skip++; | ||
| 951 | iter->p = p; | ||
| 952 | obj = rht_obj(ht, p); | ||
| 953 | goto out; | ||
| 954 | } | ||
| 955 | |||
| 956 | iter->skip = 0; | ||
| 957 | } | ||
| 958 | |||
| 959 | iter->p = NULL; | ||
| 960 | |||
| 961 | out: | ||
| 962 | if (iter->walker->resize) { | ||
| 963 | iter->p = NULL; | ||
| 964 | iter->slot = 0; | ||
| 965 | iter->skip = 0; | ||
| 966 | iter->walker->resize = false; | ||
| 967 | return ERR_PTR(-EAGAIN); | ||
| 968 | } | ||
| 969 | |||
| 970 | return obj; | ||
| 971 | } | ||
| 972 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); | ||
| 973 | |||
| 974 | /** | ||
| 975 | * rhashtable_walk_stop - Finish a hash table walk | ||
| 976 | * @iter: Hash table iterator | ||
| 977 | * | ||
| 978 | * Finish a hash table walk. | ||
| 979 | */ | ||
| 980 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | ||
| 981 | { | ||
| 982 | rcu_read_unlock(); | ||
| 983 | iter->p = NULL; | ||
| 984 | } | ||
| 985 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | ||
| 986 | |||
| 825 | static size_t rounded_hashtable_size(struct rhashtable_params *params) | 987 | static size_t rounded_hashtable_size(struct rhashtable_params *params) |
| 826 | { | 988 | { |
| 827 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | 989 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
| @@ -894,6 +1056,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
| 894 | memset(ht, 0, sizeof(*ht)); | 1056 | memset(ht, 0, sizeof(*ht)); |
| 895 | mutex_init(&ht->mutex); | 1057 | mutex_init(&ht->mutex); |
| 896 | memcpy(&ht->p, params, sizeof(*params)); | 1058 | memcpy(&ht->p, params, sizeof(*params)); |
| 1059 | INIT_LIST_HEAD(&ht->walkers); | ||
| 897 | 1060 | ||
| 898 | if (params->locks_mul) | 1061 | if (params->locks_mul) |
| 899 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | 1062 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); |
