aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2016-04-26 20:15:15 -0400
committerDavid Sterba <dsterba@suse.com>2016-05-10 03:46:08 -0400
commit6ac10a6ac2b11ada24580cc76dcd0c182061c576 (patch)
tree2027483cee168cf52da50e5c729b0721f2d21016
parent8eb0dfdbda3f56bf7d248ed87fcc383df114ecbb (diff)
btrfs: rename and document compression workspace members
The names are confusing, pick more fitting names and add comments. Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/compression.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ff61a41ac90b..4d5cd9624bb3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -743,8 +743,11 @@ out:
743static struct { 743static struct {
744 struct list_head idle_ws; 744 struct list_head idle_ws;
745 spinlock_t ws_lock; 745 spinlock_t ws_lock;
746 int num_ws; 746 /* Number of free workspaces */
747 atomic_t alloc_ws; 747 int free_ws;
748 /* Total number of allocated workspaces */
749 atomic_t total_ws;
750 /* Waiters for a free workspace */
748 wait_queue_head_t ws_wait; 751 wait_queue_head_t ws_wait;
749} btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; 752} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
750 753
@@ -760,7 +763,7 @@ void __init btrfs_init_compress(void)
760 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { 763 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
761 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); 764 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
762 spin_lock_init(&btrfs_comp_ws[i].ws_lock); 765 spin_lock_init(&btrfs_comp_ws[i].ws_lock);
763 atomic_set(&btrfs_comp_ws[i].alloc_ws, 0); 766 atomic_set(&btrfs_comp_ws[i].total_ws, 0);
764 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); 767 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
765 } 768 }
766} 769}
@@ -777,35 +780,35 @@ static struct list_head *find_workspace(int type)
777 780
778 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; 781 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
779 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; 782 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
780 atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; 783 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
781 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; 784 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
782 int *num_ws = &btrfs_comp_ws[idx].num_ws; 785 int *free_ws = &btrfs_comp_ws[idx].free_ws;
783again: 786again:
784 spin_lock(ws_lock); 787 spin_lock(ws_lock);
785 if (!list_empty(idle_ws)) { 788 if (!list_empty(idle_ws)) {
786 workspace = idle_ws->next; 789 workspace = idle_ws->next;
787 list_del(workspace); 790 list_del(workspace);
788 (*num_ws)--; 791 (*free_ws)--;
789 spin_unlock(ws_lock); 792 spin_unlock(ws_lock);
790 return workspace; 793 return workspace;
791 794
792 } 795 }
793 if (atomic_read(alloc_ws) > cpus) { 796 if (atomic_read(total_ws) > cpus) {
794 DEFINE_WAIT(wait); 797 DEFINE_WAIT(wait);
795 798
796 spin_unlock(ws_lock); 799 spin_unlock(ws_lock);
797 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); 800 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
798 if (atomic_read(alloc_ws) > cpus && !*num_ws) 801 if (atomic_read(total_ws) > cpus && !*free_ws)
799 schedule(); 802 schedule();
800 finish_wait(ws_wait, &wait); 803 finish_wait(ws_wait, &wait);
801 goto again; 804 goto again;
802 } 805 }
803 atomic_inc(alloc_ws); 806 atomic_inc(total_ws);
804 spin_unlock(ws_lock); 807 spin_unlock(ws_lock);
805 808
806 workspace = btrfs_compress_op[idx]->alloc_workspace(); 809 workspace = btrfs_compress_op[idx]->alloc_workspace();
807 if (IS_ERR(workspace)) { 810 if (IS_ERR(workspace)) {
808 atomic_dec(alloc_ws); 811 atomic_dec(total_ws);
809 wake_up(ws_wait); 812 wake_up(ws_wait);
810 } 813 }
811 return workspace; 814 return workspace;
@@ -820,21 +823,21 @@ static void free_workspace(int type, struct list_head *workspace)
820 int idx = type - 1; 823 int idx = type - 1;
821 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; 824 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
822 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; 825 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
823 atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; 826 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
824 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; 827 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
825 int *num_ws = &btrfs_comp_ws[idx].num_ws; 828 int *free_ws = &btrfs_comp_ws[idx].free_ws;
826 829
827 spin_lock(ws_lock); 830 spin_lock(ws_lock);
828 if (*num_ws < num_online_cpus()) { 831 if (*free_ws < num_online_cpus()) {
829 list_add(workspace, idle_ws); 832 list_add(workspace, idle_ws);
830 (*num_ws)++; 833 (*free_ws)++;
831 spin_unlock(ws_lock); 834 spin_unlock(ws_lock);
832 goto wake; 835 goto wake;
833 } 836 }
834 spin_unlock(ws_lock); 837 spin_unlock(ws_lock);
835 838
836 btrfs_compress_op[idx]->free_workspace(workspace); 839 btrfs_compress_op[idx]->free_workspace(workspace);
837 atomic_dec(alloc_ws); 840 atomic_dec(total_ws);
838wake: 841wake:
839 /* 842 /*
840 * Make sure counter is updated before we wake up waiters. 843 * Make sure counter is updated before we wake up waiters.
@@ -857,7 +860,7 @@ static void free_workspaces(void)
857 workspace = btrfs_comp_ws[i].idle_ws.next; 860 workspace = btrfs_comp_ws[i].idle_ws.next;
858 list_del(workspace); 861 list_del(workspace);
859 btrfs_compress_op[i]->free_workspace(workspace); 862 btrfs_compress_op[i]->free_workspace(workspace);
860 atomic_dec(&btrfs_comp_ws[i].alloc_ws); 863 atomic_dec(&btrfs_comp_ws[i].total_ws);
861 } 864 }
862 } 865 }
863} 866}