diff options
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r-- | fs/btrfs/async-thread.c | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 0b78bf28ff5d..905de02e4386 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | 2 | * Copyright (C) 2007 Oracle. All rights reserved. |
3 | * Copyright (C) 2014 Fujitsu. All rights reserved. | ||
3 | * | 4 | * |
4 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public | 6 | * modify it under the terms of the GNU General Public |
@@ -21,6 +22,7 @@ | |||
21 | #include <linux/list.h> | 22 | #include <linux/list.h> |
22 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
23 | #include <linux/freezer.h> | 24 | #include <linux/freezer.h> |
25 | #include <linux/workqueue.h> | ||
24 | #include "async-thread.h" | 26 | #include "async-thread.h" |
25 | 27 | ||
26 | #define WORK_QUEUED_BIT 0 | 28 | #define WORK_QUEUED_BIT 0 |
@@ -727,3 +729,138 @@ void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
727 | wake_up_process(worker->task); | 729 | wake_up_process(worker->task); |
728 | spin_unlock_irqrestore(&worker->lock, flags); | 730 | spin_unlock_irqrestore(&worker->lock, flags); |
729 | } | 731 | } |
732 | |||
733 | struct btrfs_workqueue_struct { | ||
734 | struct workqueue_struct *normal_wq; | ||
735 | /* List head pointing to ordered work list */ | ||
736 | struct list_head ordered_list; | ||
737 | |||
738 | /* Spinlock for ordered_list */ | ||
739 | spinlock_t list_lock; | ||
740 | }; | ||
741 | |||
742 | struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name, | ||
743 | int flags, | ||
744 | int max_active) | ||
745 | { | ||
746 | struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS); | ||
747 | |||
748 | if (unlikely(!ret)) | ||
749 | return NULL; | ||
750 | |||
751 | ret->normal_wq = alloc_workqueue("%s-%s", flags, max_active, | ||
752 | "btrfs", name); | ||
753 | if (unlikely(!ret->normal_wq)) { | ||
754 | kfree(ret); | ||
755 | return NULL; | ||
756 | } | ||
757 | |||
758 | INIT_LIST_HEAD(&ret->ordered_list); | ||
759 | spin_lock_init(&ret->list_lock); | ||
760 | return ret; | ||
761 | } | ||
762 | |||
763 | static void run_ordered_work(struct btrfs_workqueue_struct *wq) | ||
764 | { | ||
765 | struct list_head *list = &wq->ordered_list; | ||
766 | struct btrfs_work_struct *work; | ||
767 | spinlock_t *lock = &wq->list_lock; | ||
768 | unsigned long flags; | ||
769 | |||
770 | while (1) { | ||
771 | spin_lock_irqsave(lock, flags); | ||
772 | if (list_empty(list)) | ||
773 | break; | ||
774 | work = list_entry(list->next, struct btrfs_work_struct, | ||
775 | ordered_list); | ||
776 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | ||
777 | break; | ||
778 | |||
779 | /* | ||
780 | * we are going to call the ordered done function, but | ||
781 | * we leave the work item on the list as a barrier so | ||
782 | * that later work items that are done don't have their | ||
783 | * functions called before this one returns | ||
784 | */ | ||
785 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) | ||
786 | break; | ||
787 | spin_unlock_irqrestore(lock, flags); | ||
788 | work->ordered_func(work); | ||
789 | |||
790 | /* now take the lock again and drop our item from the list */ | ||
791 | spin_lock_irqsave(lock, flags); | ||
792 | list_del(&work->ordered_list); | ||
793 | spin_unlock_irqrestore(lock, flags); | ||
794 | |||
795 | /* | ||
796 | * we don't want to call the ordered free functions | ||
797 | * with the lock held though | ||
798 | */ | ||
799 | work->ordered_free(work); | ||
800 | } | ||
801 | spin_unlock_irqrestore(lock, flags); | ||
802 | } | ||
803 | |||
804 | static void normal_work_helper(struct work_struct *arg) | ||
805 | { | ||
806 | struct btrfs_work_struct *work; | ||
807 | struct btrfs_workqueue_struct *wq; | ||
808 | int need_order = 0; | ||
809 | |||
810 | work = container_of(arg, struct btrfs_work_struct, normal_work); | ||
811 | /* | ||
812 | * We should not touch things inside work in the following cases: | ||
813 | * 1) after work->func() if it has no ordered_free | ||
814 | * Since the struct is freed in work->func(). | ||
815 | * 2) after setting WORK_DONE_BIT | ||
816 | * The work may be freed in other threads almost instantly. | ||
817 | * So we save the needed things here. | ||
818 | */ | ||
819 | if (work->ordered_func) | ||
820 | need_order = 1; | ||
821 | wq = work->wq; | ||
822 | |||
823 | work->func(work); | ||
824 | if (need_order) { | ||
825 | set_bit(WORK_DONE_BIT, &work->flags); | ||
826 | run_ordered_work(wq); | ||
827 | } | ||
828 | } | ||
829 | |||
830 | void btrfs_init_work(struct btrfs_work_struct *work, | ||
831 | void (*func)(struct btrfs_work_struct *), | ||
832 | void (*ordered_func)(struct btrfs_work_struct *), | ||
833 | void (*ordered_free)(struct btrfs_work_struct *)) | ||
834 | { | ||
835 | work->func = func; | ||
836 | work->ordered_func = ordered_func; | ||
837 | work->ordered_free = ordered_free; | ||
838 | INIT_WORK(&work->normal_work, normal_work_helper); | ||
839 | INIT_LIST_HEAD(&work->ordered_list); | ||
840 | work->flags = 0; | ||
841 | } | ||
842 | |||
843 | void btrfs_queue_work(struct btrfs_workqueue_struct *wq, | ||
844 | struct btrfs_work_struct *work) | ||
845 | { | ||
846 | unsigned long flags; | ||
847 | |||
848 | work->wq = wq; | ||
849 | if (work->ordered_func) { | ||
850 | spin_lock_irqsave(&wq->list_lock, flags); | ||
851 | list_add_tail(&work->ordered_list, &wq->ordered_list); | ||
852 | spin_unlock_irqrestore(&wq->list_lock, flags); | ||
853 | } | ||
854 | queue_work(wq->normal_wq, &work->normal_work); | ||
855 | } | ||
856 | |||
857 | void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq) | ||
858 | { | ||
859 | destroy_workqueue(wq->normal_wq); | ||
860 | kfree(wq); | ||
861 | } | ||
862 | |||
863 | void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max) | ||
864 | { | ||
865 | workqueue_set_max_active(wq->normal_wq, max); | ||
866 | } | ||