aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rhashtable.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 13:11:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-05 13:11:24 -0400
commit687ee0ad4e897e29f4b41f7a20c866d74c5e0660 (patch)
treeb31a2af35c24a54823674cdd126993b80daeac67 /include/linux/rhashtable.h
parent3ddf40e8c31964b744ff10abb48c8e36a83ec6e7 (diff)
parent03a1eabc3f54469abd4f1784182851b2e29630cc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BBR TCP congestion control, from Neal Cardwell, Yuchung Cheng and co. at Google. https://lwn.net/Articles/701165/ 2) Do TCP Small Queues for retransmits, from Eric Dumazet. 3) Support collect_md mode for all IPV4 and IPV6 tunnels, from Alexei Starovoitov. 4) Allow cls_flower to classify packets in ip tunnels, from Amir Vadai. 5) Support DSA tagging in older mv88e6xxx switches, from Andrew Lunn. 6) Support GMAC protocol in iwlwifi mwm, from Ayala Beker. 7) Support ndo_poll_controller in mlx5, from Calvin Owens. 8) Move VRF processing to an output hook and allow l3mdev to be loopback, from David Ahern. 9) Support SOCK_DESTROY for UDP sockets. Also from David Ahern. 10) Congestion control in RXRPC, from David Howells. 11) Support geneve RX offload in ixgbe, from Emil Tantilov. 12) When hitting pressure for new incoming TCP data SKBs, perform a partial rathern than a full purge of the OFO queue (which could be huge). From Eric Dumazet. 13) Convert XFRM state and policy lookups to RCU, from Florian Westphal. 14) Support RX network flow classification to igb, from Gangfeng Huang. 15) Hardware offloading of eBPF in nfp driver, from Jakub Kicinski. 16) New skbmod packet action, from Jamal Hadi Salim. 17) Remove some inefficiencies in snmp proc output, from Jia He. 18) Add FIB notifications to properly propagate route changes to hardware which is doing forwarding offloading. From Jiri Pirko. 19) New dsa driver for qca8xxx chips, from John Crispin. 20) Implement RFC7559 ipv6 router solicitation backoff, from Maciej Żenczykowski. 21) Add L3 mode to ipvlan, from Mahesh Bandewar. 22) Support 802.1ad in mlx4, from Moshe Shemesh. 23) Support hardware LRO in mediatek driver, from Nelson Chang. 24) Add TC offloading to mlx5, from Or Gerlitz. 25) Convert various drivers to ethtool ksettings interfaces, from Philippe Reynes. 26) TX max rate limiting for cxgb4, from Rahul Lakkireddy. 27) NAPI support for ath10k, from Rajkumar Manoharan. 28) Support XDP in mlx5, from Rana Shahout and Saeed Mahameed. 29) UDP replicast support in TIPC, from Richard Alpe. 30) Per-queue statistics for qed driver, from Sudarsana Reddy Kalluru. 31) Support BQL in thunderx driver, from Sunil Goutham. 32) TSO support in alx driver, from Tobias Regnery. 33) Add stream parser engine and use it in kcm. 34) Support async DHCP replies in ipconfig module, from Uwe Kleine-König. 35) DSA port fast aging for mv88e6xxx driver, from Vivien Didelot. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1715 commits) mlxsw: switchx2: Fix misuse of hard_header_len mlxsw: spectrum: Fix misuse of hard_header_len net/faraday: Stop NCSI device on shutdown net/ncsi: Introduce ncsi_stop_dev() net/ncsi: Rework the channel monitoring net/ncsi: Allow to extend NCSI request properties net/ncsi: Rework request index allocation net/ncsi: Don't probe on the reserved channel ID (0x1f) net/ncsi: Introduce NCSI_RESERVED_CHANNEL net/ncsi: Avoid unused-value build warning from ia64-linux-gcc net: Add netdev all_adj_list refcnt propagation to fix panic net: phy: Add Edge-rate driver for Microsemi PHYs. vmxnet3: Wake queue from reset work i40e: avoid NULL pointer dereference and recursive errors on early PCI error qed: Add RoCE ll2 & GSI support qed: Add support for memory registeration verbs qed: Add support for QP verbs qed: PD,PKEY and CQ verb support qed: Add support for RoCE hw init qede: Add qedr framework ...
Diffstat (limited to 'include/linux/rhashtable.h')
-rw-r--r--include/linux/rhashtable.h543
1 files changed, 429 insertions, 114 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 3eef0802a0cd..5c132d3188be 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Resizable, Scalable, Concurrent Hash Table 2 * Resizable, Scalable, Concurrent Hash Table
3 * 3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 4 * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * 7 *
@@ -53,6 +53,11 @@ struct rhash_head {
53 struct rhash_head __rcu *next; 53 struct rhash_head __rcu *next;
54}; 54};
55 55
56struct rhlist_head {
57 struct rhash_head rhead;
58 struct rhlist_head __rcu *next;
59};
60
56/** 61/**
57 * struct bucket_table - Table of hash buckets 62 * struct bucket_table - Table of hash buckets
58 * @size: Number of hash buckets 63 * @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
137 * @key_len: Key length for hashfn 142 * @key_len: Key length for hashfn
138 * @elasticity: Maximum chain length before rehash 143 * @elasticity: Maximum chain length before rehash
139 * @p: Configuration parameters 144 * @p: Configuration parameters
145 * @rhlist: True if this is an rhltable
140 * @run_work: Deferred worker to expand/shrink asynchronously 146 * @run_work: Deferred worker to expand/shrink asynchronously
141 * @mutex: Mutex to protect current/future table swapping 147 * @mutex: Mutex to protect current/future table swapping
142 * @lock: Spin lock to protect walker list 148 * @lock: Spin lock to protect walker list
@@ -147,12 +153,21 @@ struct rhashtable {
147 unsigned int key_len; 153 unsigned int key_len;
148 unsigned int elasticity; 154 unsigned int elasticity;
149 struct rhashtable_params p; 155 struct rhashtable_params p;
156 bool rhlist;
150 struct work_struct run_work; 157 struct work_struct run_work;
151 struct mutex mutex; 158 struct mutex mutex;
152 spinlock_t lock; 159 spinlock_t lock;
153}; 160};
154 161
155/** 162/**
163 * struct rhltable - Hash table with duplicate objects in a list
164 * @ht: Underlying rhtable
165 */
166struct rhltable {
167 struct rhashtable ht;
168};
169
170/**
156 * struct rhashtable_walker - Hash table walker 171 * struct rhashtable_walker - Hash table walker
157 * @list: List entry on list of walkers 172 * @list: List entry on list of walkers
158 * @tbl: The table that we were walking over 173 * @tbl: The table that we were walking over
@@ -163,9 +178,10 @@ struct rhashtable_walker {
163}; 178};
164 179
165/** 180/**
166 * struct rhashtable_iter - Hash table iterator, fits into netlink cb 181 * struct rhashtable_iter - Hash table iterator
167 * @ht: Table to iterate through 182 * @ht: Table to iterate through
168 * @p: Current pointer 183 * @p: Current pointer
184 * @list: Current hash list pointer
169 * @walker: Associated rhashtable walker 185 * @walker: Associated rhashtable walker
170 * @slot: Current slot 186 * @slot: Current slot
171 * @skip: Number of entries to skip in slot 187 * @skip: Number of entries to skip in slot
@@ -173,7 +189,8 @@ struct rhashtable_walker {
173struct rhashtable_iter { 189struct rhashtable_iter {
174 struct rhashtable *ht; 190 struct rhashtable *ht;
175 struct rhash_head *p; 191 struct rhash_head *p;
176 struct rhashtable_walker *walker; 192 struct rhlist_head *list;
193 struct rhashtable_walker walker;
177 unsigned int slot; 194 unsigned int slot;
178 unsigned int skip; 195 unsigned int skip;
179}; 196};
@@ -339,15 +356,14 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
339 356
340int rhashtable_init(struct rhashtable *ht, 357int rhashtable_init(struct rhashtable *ht,
341 const struct rhashtable_params *params); 358 const struct rhashtable_params *params);
359int rhltable_init(struct rhltable *hlt,
360 const struct rhashtable_params *params);
342 361
343struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, 362void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
344 const void *key, 363 struct rhash_head *obj);
345 struct rhash_head *obj,
346 struct bucket_table *old_tbl);
347int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
348 364
349int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, 365void rhashtable_walk_enter(struct rhashtable *ht,
350 gfp_t gfp); 366 struct rhashtable_iter *iter);
351void rhashtable_walk_exit(struct rhashtable_iter *iter); 367void rhashtable_walk_exit(struct rhashtable_iter *iter);
352int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); 368int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
353void *rhashtable_walk_next(struct rhashtable_iter *iter); 369void *rhashtable_walk_next(struct rhashtable_iter *iter);
@@ -506,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
506 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ 522 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
507 tbl, hash, member) 523 tbl, hash, member)
508 524
525/**
526 * rhl_for_each_rcu - iterate over rcu hash table list
527 * @pos: the &struct rlist_head to use as a loop cursor.
528 * @list: the head of the list
529 *
530 * This hash chain list-traversal primitive should be used on the
531 * list returned by rhltable_lookup.
532 */
533#define rhl_for_each_rcu(pos, list) \
534 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
535
536/**
537 * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
538 * @tpos: the type * to use as a loop cursor.
539 * @pos: the &struct rlist_head to use as a loop cursor.
540 * @list: the head of the list
541 * @member: name of the &struct rlist_head within the hashable struct.
542 *
543 * This hash chain list-traversal primitive should be used on the
544 * list returned by rhltable_lookup.
545 */
546#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
547 for (pos = list; pos && rht_entry(tpos, pos, member); \
548 pos = rcu_dereference_raw(pos->next))
549
509static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, 550static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
510 const void *obj) 551 const void *obj)
511{ 552{
@@ -515,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
515 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); 556 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
516} 557}
517 558
518/** 559/* Internal function, do not use. */
519 * rhashtable_lookup_fast - search hash table, inlined version 560static inline struct rhash_head *__rhashtable_lookup(
520 * @ht: hash table
521 * @key: the pointer to the key
522 * @params: hash table parameters
523 *
524 * Computes the hash value for the key and traverses the bucket chain looking
525 * for a entry with an identical key. The first matching entry is returned.
526 *
527 * Returns the first entry on which the compare function returned true.
528 */
529static inline void *rhashtable_lookup_fast(
530 struct rhashtable *ht, const void *key, 561 struct rhashtable *ht, const void *key,
531 const struct rhashtable_params params) 562 const struct rhashtable_params params)
532{ 563{
@@ -538,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
538 struct rhash_head *he; 569 struct rhash_head *he;
539 unsigned int hash; 570 unsigned int hash;
540 571
541 rcu_read_lock();
542
543 tbl = rht_dereference_rcu(ht->tbl, ht); 572 tbl = rht_dereference_rcu(ht->tbl, ht);
544restart: 573restart:
545 hash = rht_key_hashfn(ht, tbl, key, params); 574 hash = rht_key_hashfn(ht, tbl, key, params);
@@ -548,8 +577,7 @@ restart:
548 params.obj_cmpfn(&arg, rht_obj(ht, he)) : 577 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
549 rhashtable_compare(&arg, rht_obj(ht, he))) 578 rhashtable_compare(&arg, rht_obj(ht, he)))
550 continue; 579 continue;
551 rcu_read_unlock(); 580 return he;
552 return rht_obj(ht, he);
553 } 581 }
554 582
555 /* Ensure we see any new tables. */ 583 /* Ensure we see any new tables. */
@@ -558,89 +586,165 @@ restart:
558 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 586 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
559 if (unlikely(tbl)) 587 if (unlikely(tbl))
560 goto restart; 588 goto restart;
561 rcu_read_unlock();
562 589
563 return NULL; 590 return NULL;
564} 591}
565 592
566/* Internal function, please use rhashtable_insert_fast() instead */ 593/**
567static inline int __rhashtable_insert_fast( 594 * rhashtable_lookup - search hash table
568 struct rhashtable *ht, const void *key, struct rhash_head *obj, 595 * @ht: hash table
596 * @key: the pointer to the key
597 * @params: hash table parameters
598 *
599 * Computes the hash value for the key and traverses the bucket chain looking
600 * for a entry with an identical key. The first matching entry is returned.
601 *
602 * This must only be called under the RCU read lock.
603 *
604 * Returns the first entry on which the compare function returned true.
605 */
606static inline void *rhashtable_lookup(
607 struct rhashtable *ht, const void *key,
569 const struct rhashtable_params params) 608 const struct rhashtable_params params)
570{ 609{
610 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
611
612 return he ? rht_obj(ht, he) : NULL;
613}
614
615/**
616 * rhashtable_lookup_fast - search hash table, without RCU read lock
617 * @ht: hash table
618 * @key: the pointer to the key
619 * @params: hash table parameters
620 *
621 * Computes the hash value for the key and traverses the bucket chain looking
622 * for a entry with an identical key. The first matching entry is returned.
623 *
624 * Only use this function when you have other mechanisms guaranteeing
625 * that the object won't go away after the RCU read lock is released.
626 *
627 * Returns the first entry on which the compare function returned true.
628 */
629static inline void *rhashtable_lookup_fast(
630 struct rhashtable *ht, const void *key,
631 const struct rhashtable_params params)
632{
633 void *obj;
634
635 rcu_read_lock();
636 obj = rhashtable_lookup(ht, key, params);
637 rcu_read_unlock();
638
639 return obj;
640}
641
642/**
643 * rhltable_lookup - search hash list table
644 * @hlt: hash table
645 * @key: the pointer to the key
646 * @params: hash table parameters
647 *
648 * Computes the hash value for the key and traverses the bucket chain looking
649 * for a entry with an identical key. All matching entries are returned
650 * in a list.
651 *
652 * This must only be called under the RCU read lock.
653 *
654 * Returns the list of entries that match the given key.
655 */
656static inline struct rhlist_head *rhltable_lookup(
657 struct rhltable *hlt, const void *key,
658 const struct rhashtable_params params)
659{
660 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
661
662 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
663}
664
665/* Internal function, please use rhashtable_insert_fast() instead. This
666 * function returns the existing element already in hashes in there is a clash,
667 * otherwise it returns an error via ERR_PTR().
668 */
669static inline void *__rhashtable_insert_fast(
670 struct rhashtable *ht, const void *key, struct rhash_head *obj,
671 const struct rhashtable_params params, bool rhlist)
672{
571 struct rhashtable_compare_arg arg = { 673 struct rhashtable_compare_arg arg = {
572 .ht = ht, 674 .ht = ht,
573 .key = key, 675 .key = key,
574 }; 676 };
575 struct bucket_table *tbl, *new_tbl; 677 struct rhash_head __rcu **pprev;
678 struct bucket_table *tbl;
576 struct rhash_head *head; 679 struct rhash_head *head;
577 spinlock_t *lock; 680 spinlock_t *lock;
578 unsigned int elasticity;
579 unsigned int hash; 681 unsigned int hash;
580 int err; 682 int elasticity;
683 void *data;
581 684
582restart:
583 rcu_read_lock(); 685 rcu_read_lock();
584 686
585 tbl = rht_dereference_rcu(ht->tbl, ht); 687 tbl = rht_dereference_rcu(ht->tbl, ht);
688 hash = rht_head_hashfn(ht, tbl, obj, params);
689 lock = rht_bucket_lock(tbl, hash);
690 spin_lock_bh(lock);
586 691
587 /* All insertions must grab the oldest table containing 692 if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
588 * the hashed bucket that is yet to be rehashed.
589 */
590 for (;;) {
591 hash = rht_head_hashfn(ht, tbl, obj, params);
592 lock = rht_bucket_lock(tbl, hash);
593 spin_lock_bh(lock);
594
595 if (tbl->rehash <= hash)
596 break;
597
598 spin_unlock_bh(lock);
599 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
600 }
601
602 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
603 if (unlikely(new_tbl)) {
604 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
605 if (!IS_ERR_OR_NULL(tbl))
606 goto slow_path;
607
608 err = PTR_ERR(tbl);
609 goto out;
610 }
611
612 err = -E2BIG;
613 if (unlikely(rht_grow_above_max(ht, tbl)))
614 goto out;
615
616 if (unlikely(rht_grow_above_100(ht, tbl))) {
617slow_path: 693slow_path:
618 spin_unlock_bh(lock); 694 spin_unlock_bh(lock);
619 err = rhashtable_insert_rehash(ht, tbl);
620 rcu_read_unlock(); 695 rcu_read_unlock();
621 if (err) 696 return rhashtable_insert_slow(ht, key, obj);
622 return err;
623
624 goto restart;
625 } 697 }
626 698
627 err = -EEXIST;
628 elasticity = ht->elasticity; 699 elasticity = ht->elasticity;
700 pprev = &tbl->buckets[hash];
629 rht_for_each(head, tbl, hash) { 701 rht_for_each(head, tbl, hash) {
630 if (key && 702 struct rhlist_head *plist;
631 unlikely(!(params.obj_cmpfn ? 703 struct rhlist_head *list;
632 params.obj_cmpfn(&arg, rht_obj(ht, head)) : 704
633 rhashtable_compare(&arg, rht_obj(ht, head))))) 705 elasticity--;
706 if (!key ||
707 (params.obj_cmpfn ?
708 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
709 rhashtable_compare(&arg, rht_obj(ht, head))))
710 continue;
711
712 data = rht_obj(ht, head);
713
714 if (!rhlist)
634 goto out; 715 goto out;
635 if (!--elasticity) 716
636 goto slow_path; 717
718 list = container_of(obj, struct rhlist_head, rhead);
719 plist = container_of(head, struct rhlist_head, rhead);
720
721 RCU_INIT_POINTER(list->next, plist);
722 head = rht_dereference_bucket(head->next, tbl, hash);
723 RCU_INIT_POINTER(list->rhead.next, head);
724 rcu_assign_pointer(*pprev, obj);
725
726 goto good;
637 } 727 }
638 728
639 err = 0; 729 if (elasticity <= 0)
730 goto slow_path;
731
732 data = ERR_PTR(-E2BIG);
733 if (unlikely(rht_grow_above_max(ht, tbl)))
734 goto out;
735
736 if (unlikely(rht_grow_above_100(ht, tbl)))
737 goto slow_path;
640 738
641 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 739 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
642 740
643 RCU_INIT_POINTER(obj->next, head); 741 RCU_INIT_POINTER(obj->next, head);
742 if (rhlist) {
743 struct rhlist_head *list;
744
745 list = container_of(obj, struct rhlist_head, rhead);
746 RCU_INIT_POINTER(list->next, NULL);
747 }
644 748
645 rcu_assign_pointer(tbl->buckets[hash], obj); 749 rcu_assign_pointer(tbl->buckets[hash], obj);
646 750
@@ -648,11 +752,14 @@ slow_path:
648 if (rht_grow_above_75(ht, tbl)) 752 if (rht_grow_above_75(ht, tbl))
649 schedule_work(&ht->run_work); 753 schedule_work(&ht->run_work);
650 754
755good:
756 data = NULL;
757
651out: 758out:
652 spin_unlock_bh(lock); 759 spin_unlock_bh(lock);
653 rcu_read_unlock(); 760 rcu_read_unlock();
654 761
655 return err; 762 return data;
656} 763}
657 764
658/** 765/**
@@ -675,7 +782,65 @@ static inline int rhashtable_insert_fast(
675 struct rhashtable *ht, struct rhash_head *obj, 782 struct rhashtable *ht, struct rhash_head *obj,
676 const struct rhashtable_params params) 783 const struct rhashtable_params params)
677{ 784{
678 return __rhashtable_insert_fast(ht, NULL, obj, params); 785 void *ret;
786
787 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
788 if (IS_ERR(ret))
789 return PTR_ERR(ret);
790
791 return ret == NULL ? 0 : -EEXIST;
792}
793
794/**
795 * rhltable_insert_key - insert object into hash list table
796 * @hlt: hash list table
797 * @key: the pointer to the key
798 * @list: pointer to hash list head inside object
799 * @params: hash table parameters
800 *
801 * Will take a per bucket spinlock to protect against mutual mutations
802 * on the same bucket. Multiple insertions may occur in parallel unless
803 * they map to the same bucket lock.
804 *
805 * It is safe to call this function from atomic context.
806 *
807 * Will trigger an automatic deferred table resizing if the size grows
808 * beyond the watermark indicated by grow_decision() which can be passed
809 * to rhashtable_init().
810 */
811static inline int rhltable_insert_key(
812 struct rhltable *hlt, const void *key, struct rhlist_head *list,
813 const struct rhashtable_params params)
814{
815 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
816 params, true));
817}
818
819/**
820 * rhltable_insert - insert object into hash list table
821 * @hlt: hash list table
822 * @list: pointer to hash list head inside object
823 * @params: hash table parameters
824 *
825 * Will take a per bucket spinlock to protect against mutual mutations
826 * on the same bucket. Multiple insertions may occur in parallel unless
827 * they map to the same bucket lock.
828 *
829 * It is safe to call this function from atomic context.
830 *
831 * Will trigger an automatic deferred table resizing if the size grows
832 * beyond the watermark indicated by grow_decision() which can be passed
833 * to rhashtable_init().
834 */
835static inline int rhltable_insert(
836 struct rhltable *hlt, struct rhlist_head *list,
837 const struct rhashtable_params params)
838{
839 const char *key = rht_obj(&hlt->ht, &list->rhead);
840
841 key += params.key_offset;
842
843 return rhltable_insert_key(hlt, key, list, params);
679} 844}
680 845
681/** 846/**
@@ -704,11 +869,16 @@ static inline int rhashtable_lookup_insert_fast(
704 const struct rhashtable_params params) 869 const struct rhashtable_params params)
705{ 870{
706 const char *key = rht_obj(ht, obj); 871 const char *key = rht_obj(ht, obj);
872 void *ret;
707 873
708 BUG_ON(ht->p.obj_hashfn); 874 BUG_ON(ht->p.obj_hashfn);
709 875
710 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, 876 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
711 params); 877 false);
878 if (IS_ERR(ret))
879 return PTR_ERR(ret);
880
881 return ret == NULL ? 0 : -EEXIST;
712} 882}
713 883
714/** 884/**
@@ -737,15 +907,42 @@ static inline int rhashtable_lookup_insert_key(
737 struct rhashtable *ht, const void *key, struct rhash_head *obj, 907 struct rhashtable *ht, const void *key, struct rhash_head *obj,
738 const struct rhashtable_params params) 908 const struct rhashtable_params params)
739{ 909{
910 void *ret;
911
912 BUG_ON(!ht->p.obj_hashfn || !key);
913
914 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
915 if (IS_ERR(ret))
916 return PTR_ERR(ret);
917
918 return ret == NULL ? 0 : -EEXIST;
919}
920
921/**
922 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
923 * @ht: hash table
924 * @obj: pointer to hash head inside object
925 * @params: hash table parameters
926 * @data: pointer to element data already in hashes
927 *
928 * Just like rhashtable_lookup_insert_key(), but this function returns the
929 * object if it exists, NULL if it does not and the insertion was successful,
930 * and an ERR_PTR otherwise.
931 */
932static inline void *rhashtable_lookup_get_insert_key(
933 struct rhashtable *ht, const void *key, struct rhash_head *obj,
934 const struct rhashtable_params params)
935{
740 BUG_ON(!ht->p.obj_hashfn || !key); 936 BUG_ON(!ht->p.obj_hashfn || !key);
741 937
742 return __rhashtable_insert_fast(ht, key, obj, params); 938 return __rhashtable_insert_fast(ht, key, obj, params, false);
743} 939}
744 940
745/* Internal function, please use rhashtable_remove_fast() instead */ 941/* Internal function, please use rhashtable_remove_fast() instead */
746static inline int __rhashtable_remove_fast( 942static inline int __rhashtable_remove_fast_one(
747 struct rhashtable *ht, struct bucket_table *tbl, 943 struct rhashtable *ht, struct bucket_table *tbl,
748 struct rhash_head *obj, const struct rhashtable_params params) 944 struct rhash_head *obj, const struct rhashtable_params params,
945 bool rhlist)
749{ 946{
750 struct rhash_head __rcu **pprev; 947 struct rhash_head __rcu **pprev;
751 struct rhash_head *he; 948 struct rhash_head *he;
@@ -760,39 +957,66 @@ static inline int __rhashtable_remove_fast(
760 957
761 pprev = &tbl->buckets[hash]; 958 pprev = &tbl->buckets[hash];
762 rht_for_each(he, tbl, hash) { 959 rht_for_each(he, tbl, hash) {
960 struct rhlist_head *list;
961
962 list = container_of(he, struct rhlist_head, rhead);
963
763 if (he != obj) { 964 if (he != obj) {
965 struct rhlist_head __rcu **lpprev;
966
764 pprev = &he->next; 967 pprev = &he->next;
765 continue; 968
969 if (!rhlist)
970 continue;
971
972 do {
973 lpprev = &list->next;
974 list = rht_dereference_bucket(list->next,
975 tbl, hash);
976 } while (list && obj != &list->rhead);
977
978 if (!list)
979 continue;
980
981 list = rht_dereference_bucket(list->next, tbl, hash);
982 RCU_INIT_POINTER(*lpprev, list);
983 err = 0;
984 break;
766 } 985 }
767 986
768 rcu_assign_pointer(*pprev, obj->next); 987 obj = rht_dereference_bucket(obj->next, tbl, hash);
769 err = 0; 988 err = 1;
989
990 if (rhlist) {
991 list = rht_dereference_bucket(list->next, tbl, hash);
992 if (list) {
993 RCU_INIT_POINTER(list->rhead.next, obj);
994 obj = &list->rhead;
995 err = 0;
996 }
997 }
998
999 rcu_assign_pointer(*pprev, obj);
770 break; 1000 break;
771 } 1001 }
772 1002
773 spin_unlock_bh(lock); 1003 spin_unlock_bh(lock);
774 1004
1005 if (err > 0) {
1006 atomic_dec(&ht->nelems);
1007 if (unlikely(ht->p.automatic_shrinking &&
1008 rht_shrink_below_30(ht, tbl)))
1009 schedule_work(&ht->run_work);
1010 err = 0;
1011 }
1012
775 return err; 1013 return err;
776} 1014}
777 1015
778/** 1016/* Internal function, please use rhashtable_remove_fast() instead */
779 * rhashtable_remove_fast - remove object from hash table 1017static inline int __rhashtable_remove_fast(
780 * @ht: hash table
781 * @obj: pointer to hash head inside object
782 * @params: hash table parameters
783 *
784 * Since the hash chain is single linked, the removal operation needs to
785 * walk the bucket chain upon removal. The removal operation is thus
786 * considerable slow if the hash table is not correctly sized.
787 *
788 * Will automatically shrink the table via rhashtable_expand() if the
789 * shrink_decision function specified at rhashtable_init() returns true.
790 *
791 * Returns zero on success, -ENOENT if the entry could not be found.
792 */
793static inline int rhashtable_remove_fast(
794 struct rhashtable *ht, struct rhash_head *obj, 1018 struct rhashtable *ht, struct rhash_head *obj,
795 const struct rhashtable_params params) 1019 const struct rhashtable_params params, bool rhlist)
796{ 1020{
797 struct bucket_table *tbl; 1021 struct bucket_table *tbl;
798 int err; 1022 int err;
@@ -806,24 +1030,60 @@ static inline int rhashtable_remove_fast(
806 * visible then that guarantees the entry to still be in 1030 * visible then that guarantees the entry to still be in
807 * the old tbl if it exists. 1031 * the old tbl if it exists.
808 */ 1032 */
809 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) && 1033 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1034 rhlist)) &&
810 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) 1035 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
811 ; 1036 ;
812 1037
813 if (err)
814 goto out;
815
816 atomic_dec(&ht->nelems);
817 if (unlikely(ht->p.automatic_shrinking &&
818 rht_shrink_below_30(ht, tbl)))
819 schedule_work(&ht->run_work);
820
821out:
822 rcu_read_unlock(); 1038 rcu_read_unlock();
823 1039
824 return err; 1040 return err;
825} 1041}
826 1042
1043/**
1044 * rhashtable_remove_fast - remove object from hash table
1045 * @ht: hash table
1046 * @obj: pointer to hash head inside object
1047 * @params: hash table parameters
1048 *
1049 * Since the hash chain is single linked, the removal operation needs to
1050 * walk the bucket chain upon removal. The removal operation is thus
1051 * considerable slow if the hash table is not correctly sized.
1052 *
1053 * Will automatically shrink the table via rhashtable_expand() if the
1054 * shrink_decision function specified at rhashtable_init() returns true.
1055 *
1056 * Returns zero on success, -ENOENT if the entry could not be found.
1057 */
1058static inline int rhashtable_remove_fast(
1059 struct rhashtable *ht, struct rhash_head *obj,
1060 const struct rhashtable_params params)
1061{
1062 return __rhashtable_remove_fast(ht, obj, params, false);
1063}
1064
1065/**
1066 * rhltable_remove - remove object from hash list table
1067 * @hlt: hash list table
1068 * @list: pointer to hash list head inside object
1069 * @params: hash table parameters
1070 *
1071 * Since the hash chain is single linked, the removal operation needs to
1072 * walk the bucket chain upon removal. The removal operation is thus
1073 * considerable slow if the hash table is not correctly sized.
1074 *
1075 * Will automatically shrink the table via rhashtable_expand() if the
1076 * shrink_decision function specified at rhashtable_init() returns true.
1077 *
1078 * Returns zero on success, -ENOENT if the entry could not be found.
1079 */
1080static inline int rhltable_remove(
1081 struct rhltable *hlt, struct rhlist_head *list,
1082 const struct rhashtable_params params)
1083{
1084 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1085}
1086
827/* Internal function, please use rhashtable_replace_fast() instead */ 1087/* Internal function, please use rhashtable_replace_fast() instead */
828static inline int __rhashtable_replace_fast( 1088static inline int __rhashtable_replace_fast(
829 struct rhashtable *ht, struct bucket_table *tbl, 1089 struct rhashtable *ht, struct bucket_table *tbl,
@@ -906,4 +1166,59 @@ static inline int rhashtable_replace_fast(
906 return err; 1166 return err;
907} 1167}
908 1168
1169/* Obsolete function, do not use in new code. */
1170static inline int rhashtable_walk_init(struct rhashtable *ht,
1171 struct rhashtable_iter *iter, gfp_t gfp)
1172{
1173 rhashtable_walk_enter(ht, iter);
1174 return 0;
1175}
1176
1177/**
1178 * rhltable_walk_enter - Initialise an iterator
1179 * @hlt: Table to walk over
1180 * @iter: Hash table Iterator
1181 *
1182 * This function prepares a hash table walk.
1183 *
1184 * Note that if you restart a walk after rhashtable_walk_stop you
1185 * may see the same object twice. Also, you may miss objects if
1186 * there are removals in between rhashtable_walk_stop and the next
1187 * call to rhashtable_walk_start.
1188 *
1189 * For a completely stable walk you should construct your own data
1190 * structure outside the hash table.
1191 *
1192 * This function may sleep so you must not call it from interrupt
1193 * context or with spin locks held.
1194 *
1195 * You must call rhashtable_walk_exit after this function returns.
1196 */
1197static inline void rhltable_walk_enter(struct rhltable *hlt,
1198 struct rhashtable_iter *iter)
1199{
1200 return rhashtable_walk_enter(&hlt->ht, iter);
1201}
1202
1203/**
1204 * rhltable_free_and_destroy - free elements and destroy hash list table
1205 * @hlt: the hash list table to destroy
1206 * @free_fn: callback to release resources of element
1207 * @arg: pointer passed to free_fn
1208 *
1209 * See documentation for rhashtable_free_and_destroy.
1210 */
1211static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1212 void (*free_fn)(void *ptr,
1213 void *arg),
1214 void *arg)
1215{
1216 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1217}
1218
1219static inline void rhltable_destroy(struct rhltable *hlt)
1220{
1221 return rhltable_free_and_destroy(hlt, NULL, NULL);
1222}
1223
909#endif /* _LINUX_RHASHTABLE_H */ 1224#endif /* _LINUX_RHASHTABLE_H */