aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-20 06:57:06 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-20 16:16:24 -0400
commitdc0ee268d85026530720d8c874716287b7ede25b (patch)
treea06b2334d07b4a9177963cb34ff2455cf224c83b /lib
parent6cca7289d5cba80d61da711205cd230fc637e2e3 (diff)
rhashtable: Rip out obsolete out-of-line interface
Now that all rhashtable users have been converted over to the inline interface, this patch removes the unused out-of-line interface. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c284
1 files changed, 0 insertions, 284 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index d1d23fb58525..83cfedd6612a 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -339,290 +339,6 @@ exit:
339} 339}
340EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 340EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
341 341
342static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
343 bool (*compare)(void *, void *), void *arg)
344{
345 struct bucket_table *tbl, *old_tbl;
346 struct rhash_head *head;
347 bool no_resize_running;
348 unsigned hash;
349 spinlock_t *old_lock;
350 bool success = true;
351
352 rcu_read_lock();
353
354 old_tbl = rht_dereference_rcu(ht->tbl, ht);
355 hash = head_hashfn(ht, old_tbl, obj);
356 old_lock = rht_bucket_lock(old_tbl, hash);
357
358 spin_lock_bh(old_lock);
359
360 /* Because we have already taken the bucket lock in old_tbl,
361 * if we find that future_tbl is not yet visible then that
362 * guarantees all other insertions of the same entry will
363 * also grab the bucket lock in old_tbl because until the
364 * rehash completes ht->tbl won't be changed.
365 */
366 tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl;
367 if (tbl != old_tbl) {
368 hash = head_hashfn(ht, tbl, obj);
369 spin_lock_nested(rht_bucket_lock(tbl, hash),
370 SINGLE_DEPTH_NESTING);
371 }
372
373 if (compare &&
374 rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
375 compare, arg)) {
376 success = false;
377 goto exit;
378 }
379
380 no_resize_running = tbl == old_tbl;
381
382 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
383
384 if (rht_is_a_nulls(head))
385 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
386 else
387 RCU_INIT_POINTER(obj->next, head);
388
389 rcu_assign_pointer(tbl->buckets[hash], obj);
390
391 atomic_inc(&ht->nelems);
392 if (no_resize_running && rht_grow_above_75(ht, tbl))
393 schedule_work(&ht->run_work);
394
395exit:
396 if (tbl != old_tbl)
397 spin_unlock(rht_bucket_lock(tbl, hash));
398
399 spin_unlock_bh(old_lock);
400
401 rcu_read_unlock();
402
403 return success;
404}
405
406/**
407 * rhashtable_insert - insert object into hash table
408 * @ht: hash table
409 * @obj: pointer to hash head inside object
410 *
411 * Will take a per bucket spinlock to protect against mutual mutations
412 * on the same bucket. Multiple insertions may occur in parallel unless
413 * they map to the same bucket lock.
414 *
415 * It is safe to call this function from atomic context.
416 *
417 * Will trigger an automatic deferred table resizing if the size grows
418 * beyond the watermark indicated by grow_decision() which can be passed
419 * to rhashtable_init().
420 */
421void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
422{
423 __rhashtable_insert(ht, obj, NULL, NULL);
424}
425EXPORT_SYMBOL_GPL(rhashtable_insert);
426
427static bool __rhashtable_remove(struct rhashtable *ht,
428 struct bucket_table *tbl,
429 struct rhash_head *obj)
430{
431 struct rhash_head __rcu **pprev;
432 struct rhash_head *he;
433 spinlock_t * lock;
434 unsigned hash;
435 bool ret = false;
436
437 hash = head_hashfn(ht, tbl, obj);
438 lock = rht_bucket_lock(tbl, hash);
439
440 spin_lock_bh(lock);
441
442 pprev = &tbl->buckets[hash];
443 rht_for_each(he, tbl, hash) {
444 if (he != obj) {
445 pprev = &he->next;
446 continue;
447 }
448
449 rcu_assign_pointer(*pprev, obj->next);
450 ret = true;
451 break;
452 }
453
454 spin_unlock_bh(lock);
455
456 return ret;
457}
458
459/**
460 * rhashtable_remove - remove object from hash table
461 * @ht: hash table
462 * @obj: pointer to hash head inside object
463 *
464 * Since the hash chain is single linked, the removal operation needs to
465 * walk the bucket chain upon removal. The removal operation is thus
466 * considerable slow if the hash table is not correctly sized.
467 *
468 * Will automatically shrink the table via rhashtable_expand() if the
469 * shrink_decision function specified at rhashtable_init() returns true.
470 *
471 * The caller must ensure that no concurrent table mutations occur. It is
472 * however valid to have concurrent lookups if they are RCU protected.
473 */
474bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
475{
476 struct bucket_table *tbl;
477 bool ret;
478
479 rcu_read_lock();
480
481 tbl = rht_dereference_rcu(ht->tbl, ht);
482
483 /* Because we have already taken (and released) the bucket
484 * lock in old_tbl, if we find that future_tbl is not yet
485 * visible then that guarantees the entry to still be in
486 * the old tbl if it exists.
487 */
488 while (!(ret = __rhashtable_remove(ht, tbl, obj)) &&
489 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
490 ;
491
492 if (ret) {
493 atomic_dec(&ht->nelems);
494 if (rht_shrink_below_30(ht, tbl))
495 schedule_work(&ht->run_work);
496 }
497
498 rcu_read_unlock();
499
500 return ret;
501}
502EXPORT_SYMBOL_GPL(rhashtable_remove);
503
504/**
505 * rhashtable_lookup - lookup key in hash table
506 * @ht: hash table
507 * @key: pointer to key
508 *
509 * Computes the hash value for the key and traverses the bucket chain looking
510 * for a entry with an identical key. The first matching entry is returned.
511 *
512 * This lookup function may only be used for fixed key hash table (key_len
513 * parameter set). It will BUG() if used inappropriately.
514 *
515 * Lookups may occur in parallel with hashtable mutations and resizing.
516 */
517void *rhashtable_lookup(struct rhashtable *ht, const void *key)
518{
519 return rhashtable_lookup_fast(ht, key, ht->p);
520}
521EXPORT_SYMBOL_GPL(rhashtable_lookup);
522
523/**
524 * rhashtable_lookup_compare - search hash table with compare function
525 * @ht: hash table
526 * @key: the pointer to the key
527 * @compare: compare function, must return true on match
528 * @arg: argument passed on to compare function
529 *
530 * Traverses the bucket chain behind the provided hash value and calls the
531 * specified compare function for each entry.
532 *
533 * Lookups may occur in parallel with hashtable mutations and resizing.
534 *
535 * Returns the first entry on which the compare function returned true.
536 */
537void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
538 bool (*compare)(void *, void *),
539 void *arg)
540{
541 const struct bucket_table *tbl;
542 struct rhash_head *he;
543 u32 hash;
544
545 rcu_read_lock();
546
547 tbl = rht_dereference_rcu(ht->tbl, ht);
548restart:
549 hash = rht_key_hashfn(ht, tbl, key, ht->p);
550 rht_for_each_rcu(he, tbl, hash) {
551 if (!compare(rht_obj(ht, he), arg))
552 continue;
553 rcu_read_unlock();
554 return rht_obj(ht, he);
555 }
556
557 /* Ensure we see any new tables. */
558 smp_rmb();
559
560 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
561 if (unlikely(tbl))
562 goto restart;
563 rcu_read_unlock();
564
565 return NULL;
566}
567EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
568
569/**
570 * rhashtable_lookup_insert - lookup and insert object into hash table
571 * @ht: hash table
572 * @obj: pointer to hash head inside object
573 *
574 * Locks down the bucket chain in both the old and new table if a resize
575 * is in progress to ensure that writers can't remove from the old table
576 * and can't insert to the new table during the atomic operation of search
577 * and insertion. Searches for duplicates in both the old and new table if
578 * a resize is in progress.
579 *
580 * This lookup function may only be used for fixed key hash table (key_len
581 * parameter set). It will BUG() if used inappropriately.
582 *
583 * It is safe to call this function from atomic context.
584 *
585 * Will trigger an automatic deferred table resizing if the size grows
586 * beyond the watermark indicated by grow_decision() which can be passed
587 * to rhashtable_init().
588 */
589bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
590{
591 return rhashtable_lookup_insert_fast(ht, obj, ht->p);
592}
593EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
594
595/**
596 * rhashtable_lookup_compare_insert - search and insert object to hash table
597 * with compare function
598 * @ht: hash table
599 * @obj: pointer to hash head inside object
600 * @compare: compare function, must return true on match
601 * @arg: argument passed on to compare function
602 *
603 * Locks down the bucket chain in both the old and new table if a resize
604 * is in progress to ensure that writers can't remove from the old table
605 * and can't insert to the new table during the atomic operation of search
606 * and insertion. Searches for duplicates in both the old and new table if
607 * a resize is in progress.
608 *
609 * Lookups may occur in parallel with hashtable mutations and resizing.
610 *
611 * Will trigger an automatic deferred table resizing if the size grows
612 * beyond the watermark indicated by grow_decision() which can be passed
613 * to rhashtable_init().
614 */
615bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
616 struct rhash_head *obj,
617 bool (*compare)(void *, void *),
618 void *arg)
619{
620 BUG_ON(!ht->p.key_len);
621
622 return __rhashtable_insert(ht, obj, compare, arg);
623}
624EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
625
626/** 342/**
627 * rhashtable_walk_init - Initialise an iterator 343 * rhashtable_walk_init - Initialise an iterator
628 * @ht: Table to walk over 344 * @ht: Table to walk over