aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2016-08-24 06:31:31 -0400
committerPablo Neira Ayuso <pablo@netfilter.org>2016-08-26 11:29:41 -0400
commit5ca8cc5bf11faed257c762018aea9106d529232f (patch)
treefbde5c6f8c70195e649dbb0b0f69e6a808eb37e9
parent6133740d6e80d969ff7d41098a9db1091d0f9c94 (diff)
rhashtable: add rhashtable_lookup_get_insert_key()
This patch modifies __rhashtable_insert_fast() so it returns the existing object that clashes with the one that you want to insert. In case the object is successfully inserted, NULL is returned. Otherwise, you get an error via ERR_PTR(). This patch adapts the existing callers of __rhashtable_insert_fast() so they handle this new logic, and it adds a new rhashtable_lookup_get_insert_key() interface to fetch this existing object. nf_tables needs this change to improve handling of EEXIST cases via honoring the NLM_F_EXCL flag and by checking if the data part of the mapping matches what we have. Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Thomas Graf <tgraf@suug.ch> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--include/linux/rhashtable.h70
-rw-r--r--lib/rhashtable.c10
2 files changed, 64 insertions, 16 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 3eef0802a0cd..26b7a059c65e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -343,7 +343,8 @@ int rhashtable_init(struct rhashtable *ht,
343struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, 343struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
344 const void *key, 344 const void *key,
345 struct rhash_head *obj, 345 struct rhash_head *obj,
346 struct bucket_table *old_tbl); 346 struct bucket_table *old_tbl,
347 void **data);
347int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); 348int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
348 349
349int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, 350int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
@@ -563,8 +564,11 @@ restart:
563 return NULL; 564 return NULL;
564} 565}
565 566
566/* Internal function, please use rhashtable_insert_fast() instead */ 567/* Internal function, please use rhashtable_insert_fast() instead. This
567static inline int __rhashtable_insert_fast( 568 * function returns the existing element already in hashes in there is a clash,
569 * otherwise it returns an error via ERR_PTR().
570 */
571static inline void *__rhashtable_insert_fast(
568 struct rhashtable *ht, const void *key, struct rhash_head *obj, 572 struct rhashtable *ht, const void *key, struct rhash_head *obj,
569 const struct rhashtable_params params) 573 const struct rhashtable_params params)
570{ 574{
@@ -577,6 +581,7 @@ static inline int __rhashtable_insert_fast(
577 spinlock_t *lock; 581 spinlock_t *lock;
578 unsigned int elasticity; 582 unsigned int elasticity;
579 unsigned int hash; 583 unsigned int hash;
584 void *data = NULL;
580 int err; 585 int err;
581 586
582restart: 587restart:
@@ -601,11 +606,14 @@ restart:
601 606
602 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); 607 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
603 if (unlikely(new_tbl)) { 608 if (unlikely(new_tbl)) {
604 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl); 609 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data);
605 if (!IS_ERR_OR_NULL(tbl)) 610 if (!IS_ERR_OR_NULL(tbl))
606 goto slow_path; 611 goto slow_path;
607 612
608 err = PTR_ERR(tbl); 613 err = PTR_ERR(tbl);
614 if (err == -EEXIST)
615 err = 0;
616
609 goto out; 617 goto out;
610 } 618 }
611 619
@@ -619,25 +627,25 @@ slow_path:
619 err = rhashtable_insert_rehash(ht, tbl); 627 err = rhashtable_insert_rehash(ht, tbl);
620 rcu_read_unlock(); 628 rcu_read_unlock();
621 if (err) 629 if (err)
622 return err; 630 return ERR_PTR(err);
623 631
624 goto restart; 632 goto restart;
625 } 633 }
626 634
627 err = -EEXIST; 635 err = 0;
628 elasticity = ht->elasticity; 636 elasticity = ht->elasticity;
629 rht_for_each(head, tbl, hash) { 637 rht_for_each(head, tbl, hash) {
630 if (key && 638 if (key &&
631 unlikely(!(params.obj_cmpfn ? 639 unlikely(!(params.obj_cmpfn ?
632 params.obj_cmpfn(&arg, rht_obj(ht, head)) : 640 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
633 rhashtable_compare(&arg, rht_obj(ht, head))))) 641 rhashtable_compare(&arg, rht_obj(ht, head))))) {
642 data = rht_obj(ht, head);
634 goto out; 643 goto out;
644 }
635 if (!--elasticity) 645 if (!--elasticity)
636 goto slow_path; 646 goto slow_path;
637 } 647 }
638 648
639 err = 0;
640
641 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 649 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
642 650
643 RCU_INIT_POINTER(obj->next, head); 651 RCU_INIT_POINTER(obj->next, head);
@@ -652,7 +660,7 @@ out:
652 spin_unlock_bh(lock); 660 spin_unlock_bh(lock);
653 rcu_read_unlock(); 661 rcu_read_unlock();
654 662
655 return err; 663 return err ? ERR_PTR(err) : data;
656} 664}
657 665
658/** 666/**
@@ -675,7 +683,13 @@ static inline int rhashtable_insert_fast(
675 struct rhashtable *ht, struct rhash_head *obj, 683 struct rhashtable *ht, struct rhash_head *obj,
676 const struct rhashtable_params params) 684 const struct rhashtable_params params)
677{ 685{
678 return __rhashtable_insert_fast(ht, NULL, obj, params); 686 void *ret;
687
688 ret = __rhashtable_insert_fast(ht, NULL, obj, params);
689 if (IS_ERR(ret))
690 return PTR_ERR(ret);
691
692 return ret == NULL ? 0 : -EEXIST;
679} 693}
680 694
681/** 695/**
@@ -704,11 +718,15 @@ static inline int rhashtable_lookup_insert_fast(
704 const struct rhashtable_params params) 718 const struct rhashtable_params params)
705{ 719{
706 const char *key = rht_obj(ht, obj); 720 const char *key = rht_obj(ht, obj);
721 void *ret;
707 722
708 BUG_ON(ht->p.obj_hashfn); 723 BUG_ON(ht->p.obj_hashfn);
709 724
710 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, 725 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params);
711 params); 726 if (IS_ERR(ret))
727 return PTR_ERR(ret);
728
729 return ret == NULL ? 0 : -EEXIST;
712} 730}
713 731
714/** 732/**
@@ -737,6 +755,32 @@ static inline int rhashtable_lookup_insert_key(
737 struct rhashtable *ht, const void *key, struct rhash_head *obj, 755 struct rhashtable *ht, const void *key, struct rhash_head *obj,
738 const struct rhashtable_params params) 756 const struct rhashtable_params params)
739{ 757{
758 void *ret;
759
760 BUG_ON(!ht->p.obj_hashfn || !key);
761
762 ret = __rhashtable_insert_fast(ht, key, obj, params);
763 if (IS_ERR(ret))
764 return PTR_ERR(ret);
765
766 return ret == NULL ? 0 : -EEXIST;
767}
768
769/**
770 * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
771 * @ht: hash table
772 * @obj: pointer to hash head inside object
773 * @params: hash table parameters
774 * @data: pointer to element data already in hashes
775 *
776 * Just like rhashtable_lookup_insert_key(), but this function returns the
777 * object if it exists, NULL if it does not and the insertion was successful,
778 * and an ERR_PTR otherwise.
779 */
780static inline void *rhashtable_lookup_get_insert_key(
781 struct rhashtable *ht, const void *key, struct rhash_head *obj,
782 const struct rhashtable_params params)
783{
740 BUG_ON(!ht->p.obj_hashfn || !key); 784 BUG_ON(!ht->p.obj_hashfn || !key);
741 785
742 return __rhashtable_insert_fast(ht, key, obj, params); 786 return __rhashtable_insert_fast(ht, key, obj, params);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 5d845ffd7982..7a940d92f17e 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -438,7 +438,8 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
438struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, 438struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
439 const void *key, 439 const void *key,
440 struct rhash_head *obj, 440 struct rhash_head *obj,
441 struct bucket_table *tbl) 441 struct bucket_table *tbl,
442 void **data)
442{ 443{
443 struct rhash_head *head; 444 struct rhash_head *head;
444 unsigned int hash; 445 unsigned int hash;
@@ -449,8 +450,11 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
449 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); 450 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
450 451
451 err = -EEXIST; 452 err = -EEXIST;
452 if (key && rhashtable_lookup_fast(ht, key, ht->p)) 453 if (key) {
453 goto exit; 454 *data = rhashtable_lookup_fast(ht, key, ht->p);
455 if (*data)
456 goto exit;
457 }
454 458
455 err = -E2BIG; 459 err = -E2BIG;
456 if (unlikely(rht_grow_above_max(ht, tbl))) 460 if (unlikely(rht_grow_above_max(ht, tbl)))