aboutsummaryrefslogtreecommitdiffstats
path: root/fs/kernfs/dir.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-13 17:09:38 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-13 17:09:38 -0500
commit9b0925a6ff64a33be45497e3c798bfee8790b102 (patch)
tree827aec07fe844addd8e1dd2d1f09b4bb855c9cd1 /fs/kernfs/dir.c
parenta9f138b0e537de55933335d580ebd38c2bc53c47 (diff)
Revert "kernfs: implement kernfs_{de|re}activate[_self]()"
This reverts commit 9f010c2ad5194a4b682e747984477850fabd03be. Tejun writes: I'm sorry but can you please revert the whole series? get_active() waiting while a node is deactivated has potential to lead to deadlock and that deactivate/reactivate interface is something fundamentally flawed and that cgroup will have to work with the remove_self() like everybody else. IOW, I think the first posting was correct. Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/kernfs/dir.c')
-rw-r--r--fs/kernfs/dir.c118
1 files changed, 1 insertions, 117 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 1aeb57969bff..37dd6408f5f6 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -396,7 +396,6 @@ struct kernfs_node *kernfs_new_node(struct kernfs_root *root, const char *name,
396 396
397 atomic_set(&kn->count, 1); 397 atomic_set(&kn->count, 1);
398 atomic_set(&kn->active, KN_DEACTIVATED_BIAS); 398 atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
399 kn->deact_depth = 1;
400 RB_CLEAR_NODE(&kn->rb); 399 RB_CLEAR_NODE(&kn->rb);
401 400
402 kn->name = name; 401 kn->name = name;
@@ -462,7 +461,6 @@ int kernfs_add_one(struct kernfs_node *kn, struct kernfs_node *parent)
462 461
463 /* Mark the entry added into directory tree */ 462 /* Mark the entry added into directory tree */
464 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); 463 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
465 kn->deact_depth--;
466 ret = 0; 464 ret = 0;
467out_unlock: 465out_unlock:
468 mutex_unlock(&kernfs_mutex); 466 mutex_unlock(&kernfs_mutex);
@@ -563,7 +561,6 @@ struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv)
563 } 561 }
564 562
565 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active); 563 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
566 kn->deact_depth--;
567 kn->priv = priv; 564 kn->priv = priv;
568 kn->dir.root = root; 565 kn->dir.root = root;
569 566
@@ -776,8 +773,7 @@ static void __kernfs_deactivate(struct kernfs_node *kn)
776 /* prevent any new usage under @kn by deactivating all nodes */ 773 /* prevent any new usage under @kn by deactivating all nodes */
777 pos = NULL; 774 pos = NULL;
778 while ((pos = kernfs_next_descendant_post(pos, kn))) { 775 while ((pos = kernfs_next_descendant_post(pos, kn))) {
779 if (!pos->deact_depth++) { 776 if (atomic_read(&pos->active) >= 0) {
780 WARN_ON_ONCE(atomic_read(&pos->active) < 0);
781 atomic_add(KN_DEACTIVATED_BIAS, &pos->active); 777 atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
782 pos->flags |= KERNFS_JUST_DEACTIVATED; 778 pos->flags |= KERNFS_JUST_DEACTIVATED;
783 } 779 }
@@ -801,118 +797,6 @@ static void __kernfs_deactivate(struct kernfs_node *kn)
801 } 797 }
802} 798}
803 799
804static void __kernfs_reactivate(struct kernfs_node *kn)
805{
806 struct kernfs_node *pos;
807
808 lockdep_assert_held(&kernfs_mutex);
809
810 pos = NULL;
811 while ((pos = kernfs_next_descendant_post(pos, kn))) {
812 if (!--pos->deact_depth) {
813 WARN_ON_ONCE(atomic_read(&pos->active) >= 0);
814 atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
815 }
816 WARN_ON_ONCE(pos->deact_depth < 0);
817 }
818
819 /* some nodes reactivated, kick get_active waiters */
820 wake_up_all(&kernfs_root(kn)->deactivate_waitq);
821}
822
823static void __kernfs_deactivate_self(struct kernfs_node *kn)
824{
825 /*
826 * Take out ourself out of the active ref dependency chain and
827 * deactivate. If we're called without an active ref, lockdep will
828 * complain.
829 */
830 kernfs_put_active(kn);
831 __kernfs_deactivate(kn);
832}
833
834static void __kernfs_reactivate_self(struct kernfs_node *kn)
835{
836 __kernfs_reactivate(kn);
837 /*
838 * Restore active ref dropped by deactivate_self() so that it's
839 * balanced on return. put_active() will soon be called on @kn, so
840 * this can't break anything regardless of @kn's state.
841 */
842 atomic_inc(&kn->active);
843 if (kernfs_lockdep(kn))
844 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
845}
846
847/**
848 * kernfs_deactivate - deactivate subtree of a node
849 * @kn: kernfs_node to deactivate subtree of
850 *
851 * Deactivate the subtree of @kn. On return, there's no active operation
852 * going on under @kn and creation or renaming of a node under @kn is
853 * blocked until @kn is reactivated or removed. This function can be
854 * called multiple times and nests properly. Each invocation should be
855 * paired with kernfs_reactivate().
856 *
857 * For a kernfs user which uses simple locking, the subsystem lock would
858 * nest inside active reference. This becomes problematic if the user
859 * tries to remove nodes while holding the subystem lock as it would create
860 * a reverse locking dependency from the subsystem lock to active ref.
861 * This function can be used to break such reverse dependency. The user
862 * can call this function outside the subsystem lock and then proceed to
863 * invoke kernfs_remove() while holding the subsystem lock without
864 * introducing such reverse dependency.
865 */
866void kernfs_deactivate(struct kernfs_node *kn)
867{
868 mutex_lock(&kernfs_mutex);
869 __kernfs_deactivate(kn);
870 mutex_unlock(&kernfs_mutex);
871}
872
873/**
874 * kernfs_reactivate - reactivate subtree of a node
875 * @kn: kernfs_node to reactivate subtree of
876 *
877 * Undo kernfs_deactivate().
878 */
879void kernfs_reactivate(struct kernfs_node *kn)
880{
881 mutex_lock(&kernfs_mutex);
882 __kernfs_reactivate(kn);
883 mutex_unlock(&kernfs_mutex);
884}
885
886/**
887 * kernfs_deactivate_self - deactivate subtree of a node from its own method
888 * @kn: the self kernfs_node to deactivate subtree of
889 *
890 * The caller must be running off of a kernfs operation which is invoked
891 * with an active reference - e.g. one of kernfs_ops. Once this function
892 * is called, @kn may be removed by someone else while the enclosing method
893 * is in progress. Other than that, this function is equivalent to
894 * kernfs_deactivate() and should be paired with kernfs_reactivate_self().
895 */
896void kernfs_deactivate_self(struct kernfs_node *kn)
897{
898 mutex_lock(&kernfs_mutex);
899 __kernfs_deactivate_self(kn);
900 mutex_unlock(&kernfs_mutex);
901}
902
903/**
904 * kernfs_reactivate_self - reactivate subtree of a node from its own method
905 * @kn: the self kernfs_node to reactivate subtree of
906 *
907 * Undo kernfs_deactivate_self().
908 */
909void kernfs_reactivate_self(struct kernfs_node *kn)
910{
911 mutex_lock(&kernfs_mutex);
912 __kernfs_reactivate_self(kn);
913 mutex_unlock(&kernfs_mutex);
914}
915
916static void __kernfs_remove(struct kernfs_node *kn) 800static void __kernfs_remove(struct kernfs_node *kn)
917{ 801{
918 struct kernfs_root *root = kernfs_root(kn); 802 struct kernfs_root *root = kernfs_root(kn);