diff options
author | Qu Wenruo <quwenruo@cn.fujitsu.com> | 2015-08-19 21:30:39 -0400 |
---|---|---|
committer | Chris Mason <clm@fb.com> | 2015-08-31 14:46:40 -0400 |
commit | c6dd6ea55758cf403bdc07a51a06c2a1d474f906 (patch) | |
tree | c9e8d9f30c65e3083fe36ac91bb981546b5af85d | |
parent | 943c6e9925d90dc80207322b5799d95fb90ffec0 (diff) |
btrfs: async_thread: Fix workqueue 'max_active' value when initializing
At initializing time, for threshold-able workqueue, it's max_active
of kernel workqueue should be 1 and grow if it hits threshold.
But due to the bad naming, there is both 'max_active' for kernel
workqueue and btrfs workqueue.
So wrong value is given at workqueue initialization.
This patch fixes it, and to avoid further misunderstanding, change the
member name of btrfs_workqueue to 'current_active' and 'limit_active'.
Also corresponding comment is added for readability.
Reported-by: Alex Lyakas <alex.btrfs@zadarastorage.com>
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: Chris Mason <clm@fb.com>
-rw-r--r-- | fs/btrfs/async-thread.c | 57 | ||||
-rw-r--r-- | fs/btrfs/async-thread.h | 2 |
2 files changed, 35 insertions, 24 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 1ce06c849a86..3e36e4adc4a3 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -42,8 +42,14 @@ struct __btrfs_workqueue { | |||
42 | 42 | ||
43 | /* Thresholding related variants */ | 43 | /* Thresholding related variants */ |
44 | atomic_t pending; | 44 | atomic_t pending; |
45 | int max_active; | 45 | |
46 | int current_max; | 46 | /* Up limit of concurrency workers */ |
47 | int limit_active; | ||
48 | |||
49 | /* Current number of concurrency workers */ | ||
50 | int current_active; | ||
51 | |||
52 | /* Threshold to change current_active */ | ||
47 | int thresh; | 53 | int thresh; |
48 | unsigned int count; | 54 | unsigned int count; |
49 | spinlock_t thres_lock; | 55 | spinlock_t thres_lock; |
@@ -88,7 +94,7 @@ BTRFS_WORK_HELPER(scrubnc_helper); | |||
88 | BTRFS_WORK_HELPER(scrubparity_helper); | 94 | BTRFS_WORK_HELPER(scrubparity_helper); |
89 | 95 | ||
90 | static struct __btrfs_workqueue * | 96 | static struct __btrfs_workqueue * |
91 | __btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active, | 97 | __btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active, |
92 | int thresh) | 98 | int thresh) |
93 | { | 99 | { |
94 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); | 100 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); |
@@ -96,26 +102,31 @@ __btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active, | |||
96 | if (!ret) | 102 | if (!ret) |
97 | return NULL; | 103 | return NULL; |
98 | 104 | ||
99 | ret->max_active = max_active; | 105 | ret->limit_active = limit_active; |
100 | atomic_set(&ret->pending, 0); | 106 | atomic_set(&ret->pending, 0); |
101 | if (thresh == 0) | 107 | if (thresh == 0) |
102 | thresh = DFT_THRESHOLD; | 108 | thresh = DFT_THRESHOLD; |
103 | /* For low threshold, disabling threshold is a better choice */ | 109 | /* For low threshold, disabling threshold is a better choice */ |
104 | if (thresh < DFT_THRESHOLD) { | 110 | if (thresh < DFT_THRESHOLD) { |
105 | ret->current_max = max_active; | 111 | ret->current_active = limit_active; |
106 | ret->thresh = NO_THRESHOLD; | 112 | ret->thresh = NO_THRESHOLD; |
107 | } else { | 113 | } else { |
108 | ret->current_max = 1; | 114 | /* |
115 | * For threshold-able wq, let its concurrency grow on demand. | ||
116 | * Use minimal max_active at alloc time to reduce resource | ||
117 | * usage. | ||
118 | */ | ||
119 | ret->current_active = 1; | ||
109 | ret->thresh = thresh; | 120 | ret->thresh = thresh; |
110 | } | 121 | } |
111 | 122 | ||
112 | if (flags & WQ_HIGHPRI) | 123 | if (flags & WQ_HIGHPRI) |
113 | ret->normal_wq = alloc_workqueue("%s-%s-high", flags, | 124 | ret->normal_wq = alloc_workqueue("%s-%s-high", flags, |
114 | ret->max_active, | 125 | ret->current_active, "btrfs", |
115 | "btrfs", name); | 126 | name); |
116 | else | 127 | else |
117 | ret->normal_wq = alloc_workqueue("%s-%s", flags, | 128 | ret->normal_wq = alloc_workqueue("%s-%s", flags, |
118 | ret->max_active, "btrfs", | 129 | ret->current_active, "btrfs", |
119 | name); | 130 | name); |
120 | if (!ret->normal_wq) { | 131 | if (!ret->normal_wq) { |
121 | kfree(ret); | 132 | kfree(ret); |
@@ -134,7 +145,7 @@ __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); | |||
134 | 145 | ||
135 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, | 146 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, |
136 | unsigned int flags, | 147 | unsigned int flags, |
137 | int max_active, | 148 | int limit_active, |
138 | int thresh) | 149 | int thresh) |
139 | { | 150 | { |
140 | struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); | 151 | struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); |
@@ -143,14 +154,14 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, | |||
143 | return NULL; | 154 | return NULL; |
144 | 155 | ||
145 | ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI, | 156 | ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI, |
146 | max_active, thresh); | 157 | limit_active, thresh); |
147 | if (!ret->normal) { | 158 | if (!ret->normal) { |
148 | kfree(ret); | 159 | kfree(ret); |
149 | return NULL; | 160 | return NULL; |
150 | } | 161 | } |
151 | 162 | ||
152 | if (flags & WQ_HIGHPRI) { | 163 | if (flags & WQ_HIGHPRI) { |
153 | ret->high = __btrfs_alloc_workqueue(name, flags, max_active, | 164 | ret->high = __btrfs_alloc_workqueue(name, flags, limit_active, |
154 | thresh); | 165 | thresh); |
155 | if (!ret->high) { | 166 | if (!ret->high) { |
156 | __btrfs_destroy_workqueue(ret->normal); | 167 | __btrfs_destroy_workqueue(ret->normal); |
@@ -180,7 +191,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) | |||
180 | */ | 191 | */ |
181 | static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) | 192 | static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) |
182 | { | 193 | { |
183 | int new_max_active; | 194 | int new_current_active; |
184 | long pending; | 195 | long pending; |
185 | int need_change = 0; | 196 | int need_change = 0; |
186 | 197 | ||
@@ -197,7 +208,7 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) | |||
197 | wq->count %= (wq->thresh / 4); | 208 | wq->count %= (wq->thresh / 4); |
198 | if (!wq->count) | 209 | if (!wq->count) |
199 | goto out; | 210 | goto out; |
200 | new_max_active = wq->current_max; | 211 | new_current_active = wq->current_active; |
201 | 212 | ||
202 | /* | 213 | /* |
203 | * pending may be changed later, but it's OK since we really | 214 | * pending may be changed later, but it's OK since we really |
@@ -205,19 +216,19 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) | |||
205 | */ | 216 | */ |
206 | pending = atomic_read(&wq->pending); | 217 | pending = atomic_read(&wq->pending); |
207 | if (pending > wq->thresh) | 218 | if (pending > wq->thresh) |
208 | new_max_active++; | 219 | new_current_active++; |
209 | if (pending < wq->thresh / 2) | 220 | if (pending < wq->thresh / 2) |
210 | new_max_active--; | 221 | new_current_active--; |
211 | new_max_active = clamp_val(new_max_active, 1, wq->max_active); | 222 | new_current_active = clamp_val(new_current_active, 1, wq->limit_active); |
212 | if (new_max_active != wq->current_max) { | 223 | if (new_current_active != wq->current_active) { |
213 | need_change = 1; | 224 | need_change = 1; |
214 | wq->current_max = new_max_active; | 225 | wq->current_active = new_current_active; |
215 | } | 226 | } |
216 | out: | 227 | out: |
217 | spin_unlock(&wq->thres_lock); | 228 | spin_unlock(&wq->thres_lock); |
218 | 229 | ||
219 | if (need_change) { | 230 | if (need_change) { |
220 | workqueue_set_max_active(wq->normal_wq, wq->current_max); | 231 | workqueue_set_max_active(wq->normal_wq, wq->current_active); |
221 | } | 232 | } |
222 | } | 233 | } |
223 | 234 | ||
@@ -351,13 +362,13 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) | |||
351 | kfree(wq); | 362 | kfree(wq); |
352 | } | 363 | } |
353 | 364 | ||
354 | void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max) | 365 | void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) |
355 | { | 366 | { |
356 | if (!wq) | 367 | if (!wq) |
357 | return; | 368 | return; |
358 | wq->normal->max_active = max; | 369 | wq->normal->limit_active = limit_active; |
359 | if (wq->high) | 370 | if (wq->high) |
360 | wq->high->max_active = max; | 371 | wq->high->limit_active = limit_active; |
361 | } | 372 | } |
362 | 373 | ||
363 | void btrfs_set_work_high_priority(struct btrfs_work *work) | 374 | void btrfs_set_work_high_priority(struct btrfs_work *work) |
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index b0b093b6afec..ad4d0647d1a6 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h | |||
@@ -69,7 +69,7 @@ BTRFS_WORK_HELPER_PROTO(scrubparity_helper); | |||
69 | 69 | ||
70 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, | 70 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, |
71 | unsigned int flags, | 71 | unsigned int flags, |
72 | int max_active, | 72 | int limit_active, |
73 | int thresh); | 73 | int thresh); |
74 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, | 74 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, |
75 | btrfs_func_t func, | 75 | btrfs_func_t func, |