diff options
Diffstat (limited to 'fs/kernfs/dir.c')
-rw-r--r-- | fs/kernfs/dir.c | 138 |
1 files changed, 137 insertions, 1 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index d0fd739bf82d..8c63ae1bccb6 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c | |||
@@ -761,7 +761,12 @@ static void __kernfs_remove(struct kernfs_node *kn) | |||
761 | 761 | ||
762 | lockdep_assert_held(&kernfs_mutex); | 762 | lockdep_assert_held(&kernfs_mutex); |
763 | 763 | ||
764 | if (!kn) | 764 | /* |
765 | * Short-circuit if non-root @kn has already finished removal. | ||
766 | * This is for kernfs_remove_self() which plays with active ref | ||
767 | * after removal. | ||
768 | */ | ||
769 | if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb))) | ||
765 | return; | 770 | return; |
766 | 771 | ||
767 | pr_debug("kernfs %s: removing\n", kn->name); | 772 | pr_debug("kernfs %s: removing\n", kn->name); |
@@ -821,6 +826,137 @@ void kernfs_remove(struct kernfs_node *kn) | |||
821 | } | 826 | } |
822 | 827 | ||
823 | /** | 828 | /** |
829 | * kernfs_break_active_protection - break out of active protection | ||
830 | * @kn: the self kernfs_node | ||
831 | * | ||
832 | * The caller must be running off of a kernfs operation which is invoked | ||
833 | * with an active reference - e.g. one of kernfs_ops. Each invocation of | ||
834 | * this function must also be matched with an invocation of | ||
835 | * kernfs_unbreak_active_protection(). | ||
836 | * | ||
837 | * This function releases the active reference of @kn the caller is | ||
838 | * holding. Once this function is called, @kn may be removed at any point | ||
839 | * and the caller is solely responsible for ensuring that the objects it | ||
840 | * dereferences are accessible. | ||
841 | */ | ||
842 | void kernfs_break_active_protection(struct kernfs_node *kn) | ||
843 | { | ||
844 | /* | ||
845 | * Take out ourself out of the active ref dependency chain. If | ||
846 | * we're called without an active ref, lockdep will complain. | ||
847 | */ | ||
848 | kernfs_put_active(kn); | ||
849 | } | ||
850 | |||
851 | /** | ||
852 | * kernfs_unbreak_active_protection - undo kernfs_break_active_protection() | ||
853 | * @kn: the self kernfs_node | ||
854 | * | ||
855 | * If kernfs_break_active_protection() was called, this function must be | ||
856 | * invoked before finishing the kernfs operation. Note that while this | ||
857 | * function restores the active reference, it doesn't and can't actually | ||
858 | * restore the active protection - @kn may already or be in the process of | ||
859 | * being removed. Once kernfs_break_active_protection() is invoked, that | ||
860 | * protection is irreversibly gone for the kernfs operation instance. | ||
861 | * | ||
862 | * While this function may be called at any point after | ||
863 | * kernfs_break_active_protection() is invoked, its most useful location | ||
864 | * would be right before the enclosing kernfs operation returns. | ||
865 | */ | ||
866 | void kernfs_unbreak_active_protection(struct kernfs_node *kn) | ||
867 | { | ||
868 | /* | ||
869 | * @kn->active could be in any state; however, the increment we do | ||
870 | * here will be undone as soon as the enclosing kernfs operation | ||
871 | * finishes and this temporary bump can't break anything. If @kn | ||
872 | * is alive, nothing changes. If @kn is being deactivated, the | ||
873 | * soon-to-follow put will either finish deactivation or restore | ||
874 | * deactivated state. If @kn is already removed, the temporary | ||
875 | * bump is guaranteed to be gone before @kn is released. | ||
876 | */ | ||
877 | atomic_inc(&kn->active); | ||
878 | if (kernfs_lockdep(kn)) | ||
879 | rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_); | ||
880 | } | ||
881 | |||
882 | /** | ||
883 | * kernfs_remove_self - remove a kernfs_node from its own method | ||
884 | * @kn: the self kernfs_node to remove | ||
885 | * | ||
886 | * The caller must be running off of a kernfs operation which is invoked | ||
887 | * with an active reference - e.g. one of kernfs_ops. This can be used to | ||
888 | * implement a file operation which deletes itself. | ||
889 | * | ||
890 | * For example, the "delete" file for a sysfs device directory can be | ||
891 | * implemented by invoking kernfs_remove_self() on the "delete" file | ||
892 | * itself. This function breaks the circular dependency of trying to | ||
893 | * deactivate self while holding an active ref itself. It isn't necessary | ||
894 | * to modify the usual removal path to use kernfs_remove_self(). The | ||
895 | * "delete" implementation can simply invoke kernfs_remove_self() on self | ||
896 | * before proceeding with the usual removal path. kernfs will ignore later | ||
897 | * kernfs_remove() on self. | ||
898 | * | ||
899 | * kernfs_remove_self() can be called multiple times concurrently on the | ||
900 | * same kernfs_node. Only the first one actually performs removal and | ||
901 | * returns %true. All others will wait until the kernfs operation which | ||
902 | * won self-removal finishes and return %false. Note that the losers wait | ||
903 | * for the completion of not only the winning kernfs_remove_self() but also | ||
904 | * the whole kernfs_ops which won the arbitration. This can be used to | ||
905 | * guarantee, for example, all concurrent writes to a "delete" file to | ||
906 | * finish only after the whole operation is complete. | ||
907 | */ | ||
908 | bool kernfs_remove_self(struct kernfs_node *kn) | ||
909 | { | ||
910 | bool ret; | ||
911 | |||
912 | mutex_lock(&kernfs_mutex); | ||
913 | kernfs_break_active_protection(kn); | ||
914 | |||
915 | /* | ||
916 | * SUICIDAL is used to arbitrate among competing invocations. Only | ||
917 | * the first one will actually perform removal. When the removal | ||
918 | * is complete, SUICIDED is set and the active ref is restored | ||
919 | * while holding kernfs_mutex. The ones which lost arbitration | ||
920 | * waits for SUICDED && drained which can happen only after the | ||
921 | * enclosing kernfs operation which executed the winning instance | ||
922 | * of kernfs_remove_self() finished. | ||
923 | */ | ||
924 | if (!(kn->flags & KERNFS_SUICIDAL)) { | ||
925 | kn->flags |= KERNFS_SUICIDAL; | ||
926 | __kernfs_remove(kn); | ||
927 | kn->flags |= KERNFS_SUICIDED; | ||
928 | ret = true; | ||
929 | } else { | ||
930 | wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq; | ||
931 | DEFINE_WAIT(wait); | ||
932 | |||
933 | while (true) { | ||
934 | prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE); | ||
935 | |||
936 | if ((kn->flags & KERNFS_SUICIDED) && | ||
937 | atomic_read(&kn->active) == KN_DEACTIVATED_BIAS) | ||
938 | break; | ||
939 | |||
940 | mutex_unlock(&kernfs_mutex); | ||
941 | schedule(); | ||
942 | mutex_lock(&kernfs_mutex); | ||
943 | } | ||
944 | finish_wait(waitq, &wait); | ||
945 | WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb)); | ||
946 | ret = false; | ||
947 | } | ||
948 | |||
949 | /* | ||
950 | * This must be done while holding kernfs_mutex; otherwise, waiting | ||
951 | * for SUICIDED && deactivated could finish prematurely. | ||
952 | */ | ||
953 | kernfs_unbreak_active_protection(kn); | ||
954 | |||
955 | mutex_unlock(&kernfs_mutex); | ||
956 | return ret; | ||
957 | } | ||
958 | |||
959 | /** | ||
824 | * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it | 960 | * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it |
825 | * @parent: parent of the target | 961 | * @parent: parent of the target |
826 | * @name: name of the kernfs_node to remove | 962 | * @name: name of the kernfs_node to remove |