diff options
| -rw-r--r-- | include/linux/rhashtable.h | 35 | ||||
| -rw-r--r-- | lib/rhashtable.c | 163 | 
2 files changed, 198 insertions, 0 deletions
| diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index e0337844358e..58851275fed9 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #ifndef _LINUX_RHASHTABLE_H | 18 | #ifndef _LINUX_RHASHTABLE_H | 
| 19 | #define _LINUX_RHASHTABLE_H | 19 | #define _LINUX_RHASHTABLE_H | 
| 20 | 20 | ||
| 21 | #include <linux/compiler.h> | ||
| 21 | #include <linux/list_nulls.h> | 22 | #include <linux/list_nulls.h> | 
| 22 | #include <linux/workqueue.h> | 23 | #include <linux/workqueue.h> | 
| 23 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> | 
| @@ -111,6 +112,7 @@ struct rhashtable_params { | |||
| 111 | * @p: Configuration parameters | 112 | * @p: Configuration parameters | 
| 112 | * @run_work: Deferred worker to expand/shrink asynchronously | 113 | * @run_work: Deferred worker to expand/shrink asynchronously | 
| 113 | * @mutex: Mutex to protect current/future table swapping | 114 | * @mutex: Mutex to protect current/future table swapping | 
| 115 | * @walkers: List of active walkers | ||
| 114 | * @being_destroyed: True if table is set up for destruction | 116 | * @being_destroyed: True if table is set up for destruction | 
| 115 | */ | 117 | */ | 
| 116 | struct rhashtable { | 118 | struct rhashtable { | 
| @@ -121,9 +123,36 @@ struct rhashtable { | |||
| 121 | struct rhashtable_params p; | 123 | struct rhashtable_params p; | 
| 122 | struct work_struct run_work; | 124 | struct work_struct run_work; | 
| 123 | struct mutex mutex; | 125 | struct mutex mutex; | 
| 126 | struct list_head walkers; | ||
| 124 | bool being_destroyed; | 127 | bool being_destroyed; | 
| 125 | }; | 128 | }; | 
| 126 | 129 | ||
| 130 | /** | ||
| 131 | * struct rhashtable_walker - Hash table walker | ||
| 132 | * @list: List entry on list of walkers | ||
| 133 | * @resize: Resize event occured | ||
| 134 | */ | ||
| 135 | struct rhashtable_walker { | ||
| 136 | struct list_head list; | ||
| 137 | bool resize; | ||
| 138 | }; | ||
| 139 | |||
| 140 | /** | ||
| 141 | * struct rhashtable_iter - Hash table iterator, fits into netlink cb | ||
| 142 | * @ht: Table to iterate through | ||
| 143 | * @p: Current pointer | ||
| 144 | * @walker: Associated rhashtable walker | ||
| 145 | * @slot: Current slot | ||
| 146 | * @skip: Number of entries to skip in slot | ||
| 147 | */ | ||
| 148 | struct rhashtable_iter { | ||
| 149 | struct rhashtable *ht; | ||
| 150 | struct rhash_head *p; | ||
| 151 | struct rhashtable_walker *walker; | ||
| 152 | unsigned int slot; | ||
| 153 | unsigned int skip; | ||
| 154 | }; | ||
| 155 | |||
| 127 | static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) | 156 | static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) | 
| 128 | { | 157 | { | 
| 129 | return NULLS_MARKER(ht->p.nulls_base + hash); | 158 | return NULLS_MARKER(ht->p.nulls_base + hash); | 
| @@ -179,6 +208,12 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, | |||
| 179 | bool (*compare)(void *, void *), | 208 | bool (*compare)(void *, void *), | 
| 180 | void *arg); | 209 | void *arg); | 
| 181 | 210 | ||
| 211 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); | ||
| 212 | void rhashtable_walk_exit(struct rhashtable_iter *iter); | ||
| 213 | int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); | ||
| 214 | void *rhashtable_walk_next(struct rhashtable_iter *iter); | ||
| 215 | void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); | ||
| 216 | |||
| 182 | void rhashtable_destroy(struct rhashtable *ht); | 217 | void rhashtable_destroy(struct rhashtable *ht); | 
| 183 | 218 | ||
| 184 | #define rht_dereference(p, ht) \ | 219 | #define rht_dereference(p, ht) \ | 
| diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 904b419b72f5..057919164e23 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -484,6 +484,7 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 484 | { | 484 | { | 
| 485 | struct rhashtable *ht; | 485 | struct rhashtable *ht; | 
| 486 | struct bucket_table *tbl; | 486 | struct bucket_table *tbl; | 
| 487 | struct rhashtable_walker *walker; | ||
| 487 | 488 | ||
| 488 | ht = container_of(work, struct rhashtable, run_work); | 489 | ht = container_of(work, struct rhashtable, run_work); | 
| 489 | mutex_lock(&ht->mutex); | 490 | mutex_lock(&ht->mutex); | 
| @@ -492,6 +493,9 @@ static void rht_deferred_worker(struct work_struct *work) | |||
| 492 | 493 | ||
| 493 | tbl = rht_dereference(ht->tbl, ht); | 494 | tbl = rht_dereference(ht->tbl, ht); | 
| 494 | 495 | ||
| 496 | list_for_each_entry(walker, &ht->walkers, list) | ||
| 497 | walker->resize = true; | ||
| 498 | |||
| 495 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 499 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 
| 496 | rhashtable_expand(ht); | 500 | rhashtable_expand(ht); | 
| 497 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) | 501 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) | 
| @@ -822,6 +826,164 @@ exit: | |||
| 822 | } | 826 | } | 
| 823 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); | 827 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); | 
| 824 | 828 | ||
| 829 | /** | ||
| 830 | * rhashtable_walk_init - Initialise an iterator | ||
| 831 | * @ht: Table to walk over | ||
| 832 | * @iter: Hash table Iterator | ||
| 833 | * | ||
| 834 | * This function prepares a hash table walk. | ||
| 835 | * | ||
| 836 | * Note that if you restart a walk after rhashtable_walk_stop you | ||
| 837 | * may see the same object twice. Also, you may miss objects if | ||
| 838 | * there are removals in between rhashtable_walk_stop and the next | ||
| 839 | * call to rhashtable_walk_start. | ||
| 840 | * | ||
| 841 | * For a completely stable walk you should construct your own data | ||
| 842 | * structure outside the hash table. | ||
| 843 | * | ||
| 844 | * This function may sleep so you must not call it from interrupt | ||
| 845 | * context or with spin locks held. | ||
| 846 | * | ||
| 847 | * You must call rhashtable_walk_exit if this function returns | ||
| 848 | * successfully. | ||
| 849 | */ | ||
| 850 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | ||
| 851 | { | ||
| 852 | iter->ht = ht; | ||
| 853 | iter->p = NULL; | ||
| 854 | iter->slot = 0; | ||
| 855 | iter->skip = 0; | ||
| 856 | |||
| 857 | iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); | ||
| 858 | if (!iter->walker) | ||
| 859 | return -ENOMEM; | ||
| 860 | |||
| 861 | mutex_lock(&ht->mutex); | ||
| 862 | list_add(&iter->walker->list, &ht->walkers); | ||
| 863 | mutex_unlock(&ht->mutex); | ||
| 864 | |||
| 865 | return 0; | ||
| 866 | } | ||
| 867 | EXPORT_SYMBOL_GPL(rhashtable_walk_init); | ||
| 868 | |||
| 869 | /** | ||
| 870 | * rhashtable_walk_exit - Free an iterator | ||
| 871 | * @iter: Hash table Iterator | ||
| 872 | * | ||
| 873 | * This function frees resources allocated by rhashtable_walk_init. | ||
| 874 | */ | ||
| 875 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | ||
| 876 | { | ||
| 877 | mutex_lock(&iter->ht->mutex); | ||
| 878 | list_del(&iter->walker->list); | ||
| 879 | mutex_unlock(&iter->ht->mutex); | ||
| 880 | kfree(iter->walker); | ||
| 881 | } | ||
| 882 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | ||
| 883 | |||
| 884 | /** | ||
| 885 | * rhashtable_walk_start - Start a hash table walk | ||
| 886 | * @iter: Hash table iterator | ||
| 887 | * | ||
| 888 | * Start a hash table walk. Note that we take the RCU lock in all | ||
| 889 | * cases including when we return an error. So you must always call | ||
| 890 | * rhashtable_walk_stop to clean up. | ||
| 891 | * | ||
| 892 | * Returns zero if successful. | ||
| 893 | * | ||
| 894 | * Returns -EAGAIN if resize event occured. Note that the iterator | ||
| 895 | * will rewind back to the beginning and you may use it immediately | ||
| 896 | * by calling rhashtable_walk_next. | ||
| 897 | */ | ||
| 898 | int rhashtable_walk_start(struct rhashtable_iter *iter) | ||
| 899 | { | ||
| 900 | rcu_read_lock(); | ||
| 901 | |||
| 902 | if (iter->walker->resize) { | ||
| 903 | iter->slot = 0; | ||
| 904 | iter->skip = 0; | ||
| 905 | iter->walker->resize = false; | ||
| 906 | return -EAGAIN; | ||
| 907 | } | ||
| 908 | |||
| 909 | return 0; | ||
| 910 | } | ||
| 911 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); | ||
| 912 | |||
| 913 | /** | ||
| 914 | * rhashtable_walk_next - Return the next object and advance the iterator | ||
| 915 | * @iter: Hash table iterator | ||
| 916 | * | ||
| 917 | * Note that you must call rhashtable_walk_stop when you are finished | ||
| 918 | * with the walk. | ||
| 919 | * | ||
| 920 | * Returns the next object or NULL when the end of the table is reached. | ||
| 921 | * | ||
| 922 | * Returns -EAGAIN if resize event occured. Note that the iterator | ||
| 923 | * will rewind back to the beginning and you may continue to use it. | ||
| 924 | */ | ||
| 925 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | ||
| 926 | { | ||
| 927 | const struct bucket_table *tbl; | ||
| 928 | struct rhashtable *ht = iter->ht; | ||
| 929 | struct rhash_head *p = iter->p; | ||
| 930 | void *obj = NULL; | ||
| 931 | |||
| 932 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 933 | |||
| 934 | if (p) { | ||
| 935 | p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); | ||
| 936 | goto next; | ||
| 937 | } | ||
| 938 | |||
| 939 | for (; iter->slot < tbl->size; iter->slot++) { | ||
| 940 | int skip = iter->skip; | ||
| 941 | |||
| 942 | rht_for_each_rcu(p, tbl, iter->slot) { | ||
| 943 | if (!skip) | ||
| 944 | break; | ||
| 945 | skip--; | ||
| 946 | } | ||
| 947 | |||
| 948 | next: | ||
| 949 | if (!rht_is_a_nulls(p)) { | ||
| 950 | iter->skip++; | ||
| 951 | iter->p = p; | ||
| 952 | obj = rht_obj(ht, p); | ||
| 953 | goto out; | ||
| 954 | } | ||
| 955 | |||
| 956 | iter->skip = 0; | ||
| 957 | } | ||
| 958 | |||
| 959 | iter->p = NULL; | ||
| 960 | |||
| 961 | out: | ||
| 962 | if (iter->walker->resize) { | ||
| 963 | iter->p = NULL; | ||
| 964 | iter->slot = 0; | ||
| 965 | iter->skip = 0; | ||
| 966 | iter->walker->resize = false; | ||
| 967 | return ERR_PTR(-EAGAIN); | ||
| 968 | } | ||
| 969 | |||
| 970 | return obj; | ||
| 971 | } | ||
| 972 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); | ||
| 973 | |||
| 974 | /** | ||
| 975 | * rhashtable_walk_stop - Finish a hash table walk | ||
| 976 | * @iter: Hash table iterator | ||
| 977 | * | ||
| 978 | * Finish a hash table walk. | ||
| 979 | */ | ||
| 980 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | ||
| 981 | { | ||
| 982 | rcu_read_unlock(); | ||
| 983 | iter->p = NULL; | ||
| 984 | } | ||
| 985 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | ||
| 986 | |||
| 825 | static size_t rounded_hashtable_size(struct rhashtable_params *params) | 987 | static size_t rounded_hashtable_size(struct rhashtable_params *params) | 
| 826 | { | 988 | { | 
| 827 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | 989 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | 
| @@ -894,6 +1056,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
| 894 | memset(ht, 0, sizeof(*ht)); | 1056 | memset(ht, 0, sizeof(*ht)); | 
| 895 | mutex_init(&ht->mutex); | 1057 | mutex_init(&ht->mutex); | 
| 896 | memcpy(&ht->p, params, sizeof(*params)); | 1058 | memcpy(&ht->p, params, sizeof(*params)); | 
| 1059 | INIT_LIST_HEAD(&ht->walkers); | ||
| 897 | 1060 | ||
| 898 | if (params->locks_mul) | 1061 | if (params->locks_mul) | 
| 899 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | 1062 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | 
