diff options
-rw-r--r-- | fs/btrfs/async-thread.c | 100 | ||||
-rw-r--r-- | fs/btrfs/async-thread.h | 7 |
2 files changed, 73 insertions, 34 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 8b9e2cf50606..8d4cc4679d51 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -34,6 +34,9 @@ | |||
34 | * One of these is allocated per thread. | 34 | * One of these is allocated per thread. |
35 | */ | 35 | */ |
36 | struct btrfs_worker_thread { | 36 | struct btrfs_worker_thread { |
37 | /* pool we belong to */ | ||
38 | struct btrfs_workers *workers; | ||
39 | |||
37 | /* list of struct btrfs_work that are waiting for service */ | 40 | /* list of struct btrfs_work that are waiting for service */ |
38 | struct list_head pending; | 41 | struct list_head pending; |
39 | 42 | ||
@@ -51,9 +54,45 @@ struct btrfs_worker_thread { | |||
51 | 54 | ||
52 | /* set to non-zero when this thread is already awake and kicking */ | 55 | /* set to non-zero when this thread is already awake and kicking */ |
53 | int working; | 56 | int working; |
57 | |||
58 | /* are we currently idle */ | ||
59 | int idle; | ||
54 | }; | 60 | }; |
55 | 61 | ||
56 | /* | 62 | /* |
63 | * helper function to move a thread onto the idle list after it | ||
64 | * has finished some requests. | ||
65 | */ | ||
66 | static void check_idle_worker(struct btrfs_worker_thread *worker) | ||
67 | { | ||
68 | if (!worker->idle && atomic_read(&worker->num_pending) < | ||
69 | worker->workers->idle_thresh / 2) { | ||
70 | unsigned long flags; | ||
71 | spin_lock_irqsave(&worker->workers->lock, flags); | ||
72 | worker->idle = 1; | ||
73 | list_move(&worker->worker_list, &worker->workers->idle_list); | ||
74 | spin_unlock_irqrestore(&worker->workers->lock, flags); | ||
75 | } | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * helper function to move a thread off the idle list after new | ||
80 | * pending work is added. | ||
81 | */ | ||
82 | static void check_busy_worker(struct btrfs_worker_thread *worker) | ||
83 | { | ||
84 | if (worker->idle && atomic_read(&worker->num_pending) >= | ||
85 | worker->workers->idle_thresh) { | ||
86 | unsigned long flags; | ||
87 | spin_lock_irqsave(&worker->workers->lock, flags); | ||
88 | worker->idle = 0; | ||
89 | list_move_tail(&worker->worker_list, | ||
90 | &worker->workers->worker_list); | ||
91 | spin_unlock_irqrestore(&worker->workers->lock, flags); | ||
92 | } | ||
93 | } | ||
94 | |||
95 | /* | ||
57 | * main loop for servicing work items | 96 | * main loop for servicing work items |
58 | */ | 97 | */ |
59 | static int worker_loop(void *arg) | 98 | static int worker_loop(void *arg) |
@@ -76,6 +115,7 @@ static int worker_loop(void *arg) | |||
76 | 115 | ||
77 | atomic_dec(&worker->num_pending); | 116 | atomic_dec(&worker->num_pending); |
78 | spin_lock_irq(&worker->lock); | 117 | spin_lock_irq(&worker->lock); |
118 | check_idle_worker(worker); | ||
79 | } | 119 | } |
80 | worker->working = 0; | 120 | worker->working = 0; |
81 | if (freezing(current)) { | 121 | if (freezing(current)) { |
@@ -98,6 +138,7 @@ int btrfs_stop_workers(struct btrfs_workers *workers) | |||
98 | struct list_head *cur; | 138 | struct list_head *cur; |
99 | struct btrfs_worker_thread *worker; | 139 | struct btrfs_worker_thread *worker; |
100 | 140 | ||
141 | list_splice_init(&workers->idle_list, &workers->worker_list); | ||
101 | while(!list_empty(&workers->worker_list)) { | 142 | while(!list_empty(&workers->worker_list)) { |
102 | cur = workers->worker_list.next; | 143 | cur = workers->worker_list.next; |
103 | worker = list_entry(cur, struct btrfs_worker_thread, | 144 | worker = list_entry(cur, struct btrfs_worker_thread, |
@@ -116,9 +157,10 @@ void btrfs_init_workers(struct btrfs_workers *workers, int max) | |||
116 | { | 157 | { |
117 | workers->num_workers = 0; | 158 | workers->num_workers = 0; |
118 | INIT_LIST_HEAD(&workers->worker_list); | 159 | INIT_LIST_HEAD(&workers->worker_list); |
119 | workers->last = NULL; | 160 | INIT_LIST_HEAD(&workers->idle_list); |
120 | spin_lock_init(&workers->lock); | 161 | spin_lock_init(&workers->lock); |
121 | workers->max_workers = max; | 162 | workers->max_workers = max; |
163 | workers->idle_thresh = 64; | ||
122 | } | 164 | } |
123 | 165 | ||
124 | /* | 166 | /* |
@@ -143,14 +185,14 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |||
143 | spin_lock_init(&worker->lock); | 185 | spin_lock_init(&worker->lock); |
144 | atomic_set(&worker->num_pending, 0); | 186 | atomic_set(&worker->num_pending, 0); |
145 | worker->task = kthread_run(worker_loop, worker, "btrfs"); | 187 | worker->task = kthread_run(worker_loop, worker, "btrfs"); |
188 | worker->workers = workers; | ||
146 | if (IS_ERR(worker->task)) { | 189 | if (IS_ERR(worker->task)) { |
147 | ret = PTR_ERR(worker->task); | 190 | ret = PTR_ERR(worker->task); |
148 | goto fail; | 191 | goto fail; |
149 | } | 192 | } |
150 | 193 | ||
151 | spin_lock_irq(&workers->lock); | 194 | spin_lock_irq(&workers->lock); |
152 | list_add_tail(&worker->worker_list, &workers->worker_list); | 195 | list_add_tail(&worker->worker_list, &workers->idle_list); |
153 | workers->last = worker; | ||
154 | workers->num_workers++; | 196 | workers->num_workers++; |
155 | spin_unlock_irq(&workers->lock); | 197 | spin_unlock_irq(&workers->lock); |
156 | } | 198 | } |
@@ -169,42 +211,30 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) | |||
169 | { | 211 | { |
170 | struct btrfs_worker_thread *worker; | 212 | struct btrfs_worker_thread *worker; |
171 | struct list_head *next; | 213 | struct list_head *next; |
172 | struct list_head *start; | ||
173 | int enforce_min = workers->num_workers < workers->max_workers; | 214 | int enforce_min = workers->num_workers < workers->max_workers; |
174 | 215 | ||
175 | /* start with the last thread if it isn't busy */ | ||
176 | worker = workers->last; | ||
177 | if (atomic_read(&worker->num_pending) < 64) | ||
178 | goto done; | ||
179 | |||
180 | next = worker->worker_list.next; | ||
181 | start = &worker->worker_list; | ||
182 | |||
183 | /* | 216 | /* |
184 | * check all the workers for someone that is bored. FIXME, do | 217 | * if we find an idle thread, don't move it to the end of the |
185 | * something smart here | 218 | * idle list. This improves the chance that the next submission |
219 | * will reuse the same thread, and maybe catch it while it is still | ||
220 | * working | ||
186 | */ | 221 | */ |
187 | while(next != start) { | 222 | if (!list_empty(&workers->idle_list)) { |
188 | if (next == &workers->worker_list) { | 223 | next = workers->idle_list.next; |
189 | next = workers->worker_list.next; | ||
190 | continue; | ||
191 | } | ||
192 | worker = list_entry(next, struct btrfs_worker_thread, | 224 | worker = list_entry(next, struct btrfs_worker_thread, |
193 | worker_list); | 225 | worker_list); |
194 | if (atomic_read(&worker->num_pending) < 64 || !enforce_min) | 226 | return worker; |
195 | goto done; | ||
196 | next = next->next; | ||
197 | } | 227 | } |
228 | if (enforce_min || list_empty(&workers->worker_list)) | ||
229 | return NULL; | ||
230 | |||
198 | /* | 231 | /* |
199 | * nobody was bored, if we're already at the max thread count, | 232 | * if we pick a busy task, move the task to the end of the list. |
200 | * use the last thread | 233 | * hopefully this will keep things somewhat evenly balanced |
201 | */ | 234 | */ |
202 | if (!enforce_min || atomic_read(&workers->last->num_pending) < 64) { | 235 | next = workers->worker_list.next; |
203 | return workers->last; | 236 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); |
204 | } | 237 | list_move_tail(next, &workers->worker_list); |
205 | return NULL; | ||
206 | done: | ||
207 | workers->last = worker; | ||
208 | return worker; | 238 | return worker; |
209 | } | 239 | } |
210 | 240 | ||
@@ -221,11 +251,17 @@ again: | |||
221 | if (!worker) { | 251 | if (!worker) { |
222 | spin_lock_irqsave(&workers->lock, flags); | 252 | spin_lock_irqsave(&workers->lock, flags); |
223 | if (workers->num_workers >= workers->max_workers) { | 253 | if (workers->num_workers >= workers->max_workers) { |
254 | struct list_head *fallback = NULL; | ||
224 | /* | 255 | /* |
225 | * we have failed to find any workers, just | 256 | * we have failed to find any workers, just |
226 | * return the force one | 257 | * return the force one |
227 | */ | 258 | */ |
228 | worker = list_entry(workers->worker_list.next, | 259 | if (!list_empty(&workers->worker_list)) |
260 | fallback = workers->worker_list.next; | ||
261 | if (!list_empty(&workers->idle_list)) | ||
262 | fallback = workers->idle_list.next; | ||
263 | BUG_ON(!fallback); | ||
264 | worker = list_entry(fallback, | ||
229 | struct btrfs_worker_thread, worker_list); | 265 | struct btrfs_worker_thread, worker_list); |
230 | spin_unlock_irqrestore(&workers->lock, flags); | 266 | spin_unlock_irqrestore(&workers->lock, flags); |
231 | } else { | 267 | } else { |
@@ -254,6 +290,7 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
254 | spin_lock_irqsave(&worker->lock, flags); | 290 | spin_lock_irqsave(&worker->lock, flags); |
255 | atomic_inc(&worker->num_pending); | 291 | atomic_inc(&worker->num_pending); |
256 | list_add_tail(&work->list, &worker->pending); | 292 | list_add_tail(&work->list, &worker->pending); |
293 | check_busy_worker(worker); | ||
257 | spin_unlock_irqrestore(&worker->lock, flags); | 294 | spin_unlock_irqrestore(&worker->lock, flags); |
258 | out: | 295 | out: |
259 | return 0; | 296 | return 0; |
@@ -276,6 +313,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
276 | 313 | ||
277 | spin_lock_irqsave(&worker->lock, flags); | 314 | spin_lock_irqsave(&worker->lock, flags); |
278 | atomic_inc(&worker->num_pending); | 315 | atomic_inc(&worker->num_pending); |
316 | check_busy_worker(worker); | ||
279 | list_add_tail(&work->list, &worker->pending); | 317 | list_add_tail(&work->list, &worker->pending); |
280 | 318 | ||
281 | /* | 319 | /* |
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 52fc9da0f9e7..3436ff897597 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h | |||
@@ -60,11 +60,12 @@ struct btrfs_workers { | |||
60 | /* max number of workers allowed. changed by btrfs_start_workers */ | 60 | /* max number of workers allowed. changed by btrfs_start_workers */ |
61 | int max_workers; | 61 | int max_workers; |
62 | 62 | ||
63 | /* once a worker has this many requests or fewer, it is idle */ | ||
64 | int idle_thresh; | ||
65 | |||
63 | /* list with all the work threads */ | 66 | /* list with all the work threads */ |
64 | struct list_head worker_list; | 67 | struct list_head worker_list; |
65 | 68 | struct list_head idle_list; | |
66 | /* the last worker thread to have something queued */ | ||
67 | struct btrfs_worker_thread *last; | ||
68 | 69 | ||
69 | /* lock for finding the next worker thread to queue on */ | 70 | /* lock for finding the next worker thread to queue on */ |
70 | spinlock_t lock; | 71 | spinlock_t lock; |