aboutsummaryrefslogtreecommitdiffstats
path: root/fs/kernfs
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-01-10 08:57:26 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-10 16:51:21 -0500
commit9f010c2ad5194a4b682e747984477850fabd03be (patch)
treee989b667775b3e1bf9b6da90bbaf2815eb103502 /fs/kernfs
parent895a068a524e134900b9d98b519309b7aae7bbb1 (diff)
kernfs: implement kernfs_{de|re}activate[_self]()
This patch implements four functions to manipulate deactivation state - deactivate, reactivate and the _self suffixed pair. A new fields kernfs_node->deact_depth is added so that concurrent and nested deactivations are handled properly. kernfs_node->hash is moved so that it's paired with the new field so that it doesn't increase the size of kernfs_node. A kernfs user's lock would normally nest inside active ref but during removal the user may want to perform kernfs_remove() while holding the said lock, which would introduce a reverse locking dependency. This function can be used to break such reverse dependency by allowing deactivation step to performed separately outside user's critical section. This will also be used implement kernfs_remove_self(). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/kernfs')
-rw-r--r--fs/kernfs/dir.c118
1 files changed, 117 insertions, 1 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 37dd6408f5f6..1aeb57969bff 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -396,6 +396,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_root *root, const char *name,
396 396
397 atomic_set(&kn->count, 1); 397 atomic_set(&kn->count, 1);
398 atomic_set(&kn->active, KN_DEACTIVATED_BIAS); 398 atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
399 kn->deact_depth = 1;
399 RB_CLEAR_NODE(&kn->rb); 400 RB_CLEAR_NODE(&kn->rb);
400 401
401 kn->name = name; 402 kn->name = name;
@@ -461,6 +462,7 @@ int kernfs_add_one(struct kernfs_node *kn, struct kernfs_node *parent)
461 462
462 /* Mark the entry added into directory tree */ 463 /* Mark the entry added into directory tree */
463 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); 464 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
465 kn->deact_depth--;
464 ret = 0; 466 ret = 0;
465out_unlock: 467out_unlock:
466 mutex_unlock(&kernfs_mutex); 468 mutex_unlock(&kernfs_mutex);
@@ -561,6 +563,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv)
561 } 563 }
562 564
563 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); 565 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
566 kn->deact_depth--;
564 kn->priv = priv; 567 kn->priv = priv;
565 kn->dir.root = root; 568 kn->dir.root = root;
566 569
@@ -773,7 +776,8 @@ static void __kernfs_deactivate(struct kernfs_node *kn)
773 /* prevent any new usage under @kn by deactivating all nodes */ 776 /* prevent any new usage under @kn by deactivating all nodes */
774 pos = NULL; 777 pos = NULL;
775 while ((pos = kernfs_next_descendant_post(pos, kn))) { 778 while ((pos = kernfs_next_descendant_post(pos, kn))) {
776 if (atomic_read(&pos->active) >= 0) { 779 if (!pos->deact_depth++) {
780 WARN_ON_ONCE(atomic_read(&pos->active) < 0);
777 atomic_add(KN_DEACTIVATED_BIAS, &pos->active); 781 atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
778 pos->flags |= KERNFS_JUST_DEACTIVATED; 782 pos->flags |= KERNFS_JUST_DEACTIVATED;
779 } 783 }
@@ -797,6 +801,118 @@ static void __kernfs_deactivate(struct kernfs_node *kn)
797 } 801 }
798} 802}
799 803
804static void __kernfs_reactivate(struct kernfs_node *kn)
805{
806 struct kernfs_node *pos;
807
808 lockdep_assert_held(&kernfs_mutex);
809
810 pos = NULL;
811 while ((pos = kernfs_next_descendant_post(pos, kn))) {
812 if (!--pos->deact_depth) {
813 WARN_ON_ONCE(atomic_read(&pos->active) >= 0);
814 atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
815 }
816 WARN_ON_ONCE(pos->deact_depth < 0);
817 }
818
819 /* some nodes reactivated, kick get_active waiters */
820 wake_up_all(&kernfs_root(kn)->deactivate_waitq);
821}
822
823static void __kernfs_deactivate_self(struct kernfs_node *kn)
824{
825 /*
826 * Take out ourself out of the active ref dependency chain and
827 * deactivate. If we're called without an active ref, lockdep will
828 * complain.
829 */
830 kernfs_put_active(kn);
831 __kernfs_deactivate(kn);
832}
833
834static void __kernfs_reactivate_self(struct kernfs_node *kn)
835{
836 __kernfs_reactivate(kn);
837 /*
838 * Restore active ref dropped by deactivate_self() so that it's
839 * balanced on return. put_active() will soon be called on @kn, so
840 * this can't break anything regardless of @kn's state.
841 */
842 atomic_inc(&kn->active);
843 if (kernfs_lockdep(kn))
844 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
845}
846
847/**
848 * kernfs_deactivate - deactivate subtree of a node
849 * @kn: kernfs_node to deactivate subtree of
850 *
851 * Deactivate the subtree of @kn. On return, there's no active operation
852 * going on under @kn and creation or renaming of a node under @kn is
853 * blocked until @kn is reactivated or removed. This function can be
854 * called multiple times and nests properly. Each invocation should be
855 * paired with kernfs_reactivate().
856 *
857 * For a kernfs user which uses simple locking, the subsystem lock would
858 * nest inside active reference. This becomes problematic if the user
859 * tries to remove nodes while holding the subystem lock as it would create
860 * a reverse locking dependency from the subsystem lock to active ref.
861 * This function can be used to break such reverse dependency. The user
862 * can call this function outside the subsystem lock and then proceed to
863 * invoke kernfs_remove() while holding the subsystem lock without
864 * introducing such reverse dependency.
865 */
866void kernfs_deactivate(struct kernfs_node *kn)
867{
868 mutex_lock(&kernfs_mutex);
869 __kernfs_deactivate(kn);
870 mutex_unlock(&kernfs_mutex);
871}
872
873/**
874 * kernfs_reactivate - reactivate subtree of a node
875 * @kn: kernfs_node to reactivate subtree of
876 *
877 * Undo kernfs_deactivate().
878 */
879void kernfs_reactivate(struct kernfs_node *kn)
880{
881 mutex_lock(&kernfs_mutex);
882 __kernfs_reactivate(kn);
883 mutex_unlock(&kernfs_mutex);
884}
885
886/**
887 * kernfs_deactivate_self - deactivate subtree of a node from its own method
888 * @kn: the self kernfs_node to deactivate subtree of
889 *
890 * The caller must be running off of a kernfs operation which is invoked
891 * with an active reference - e.g. one of kernfs_ops. Once this function
892 * is called, @kn may be removed by someone else while the enclosing method
893 * is in progress. Other than that, this function is equivalent to
894 * kernfs_deactivate() and should be paired with kernfs_reactivate_self().
895 */
896void kernfs_deactivate_self(struct kernfs_node *kn)
897{
898 mutex_lock(&kernfs_mutex);
899 __kernfs_deactivate_self(kn);
900 mutex_unlock(&kernfs_mutex);
901}
902
903/**
904 * kernfs_reactivate_self - reactivate subtree of a node from its own method
905 * @kn: the self kernfs_node to reactivate subtree of
906 *
907 * Undo kernfs_deactivate_self().
908 */
909void kernfs_reactivate_self(struct kernfs_node *kn)
910{
911 mutex_lock(&kernfs_mutex);
912 __kernfs_reactivate_self(kn);
913 mutex_unlock(&kernfs_mutex);
914}
915
800static void __kernfs_remove(struct kernfs_node *kn) 916static void __kernfs_remove(struct kernfs_node *kn)
801{ 917{
802 struct kernfs_root *root = kernfs_root(kn); 918 struct kernfs_root *root = kernfs_root(kn);