diff options
author | Felix Blyakher <felixb@sgi.com> | 2009-06-10 18:07:47 -0400 |
---|---|---|
committer | Felix Blyakher <felixb@sgi.com> | 2009-06-10 18:07:47 -0400 |
commit | 4e73e0eb633f8a1b5cbf20e7f42c6dbfec1d1ca7 (patch) | |
tree | 0cea46e43f0625244c3d06a71d6559e5ec5419ca /fs/btrfs/async-thread.c | |
parent | 4156e735d3abde8e9243b5d22f7999dd3fffab2e (diff) | |
parent | 07a2039b8eb0af4ff464efd3dfd95de5c02648c6 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r-- | fs/btrfs/async-thread.c | 67 |
1 files changed, 52 insertions, 15 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index c84ca1f5259a..502c3d61de62 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -20,12 +20,12 @@ | |||
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/freezer.h> | 22 | #include <linux/freezer.h> |
23 | #include <linux/ftrace.h> | ||
24 | #include "async-thread.h" | 23 | #include "async-thread.h" |
25 | 24 | ||
26 | #define WORK_QUEUED_BIT 0 | 25 | #define WORK_QUEUED_BIT 0 |
27 | #define WORK_DONE_BIT 1 | 26 | #define WORK_DONE_BIT 1 |
28 | #define WORK_ORDER_DONE_BIT 2 | 27 | #define WORK_ORDER_DONE_BIT 2 |
28 | #define WORK_HIGH_PRIO_BIT 3 | ||
29 | 29 | ||
30 | /* | 30 | /* |
31 | * container for the kthread task pointer and the list of pending work | 31 | * container for the kthread task pointer and the list of pending work |
@@ -37,6 +37,7 @@ struct btrfs_worker_thread { | |||
37 | 37 | ||
38 | /* list of struct btrfs_work that are waiting for service */ | 38 | /* list of struct btrfs_work that are waiting for service */ |
39 | struct list_head pending; | 39 | struct list_head pending; |
40 | struct list_head prio_pending; | ||
40 | 41 | ||
41 | /* list of worker threads from struct btrfs_workers */ | 42 | /* list of worker threads from struct btrfs_workers */ |
42 | struct list_head worker_list; | 43 | struct list_head worker_list; |
@@ -104,10 +105,16 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers, | |||
104 | 105 | ||
105 | spin_lock_irqsave(&workers->lock, flags); | 106 | spin_lock_irqsave(&workers->lock, flags); |
106 | 107 | ||
107 | while (!list_empty(&workers->order_list)) { | 108 | while (1) { |
108 | work = list_entry(workers->order_list.next, | 109 | if (!list_empty(&workers->prio_order_list)) { |
109 | struct btrfs_work, order_list); | 110 | work = list_entry(workers->prio_order_list.next, |
110 | 111 | struct btrfs_work, order_list); | |
112 | } else if (!list_empty(&workers->order_list)) { | ||
113 | work = list_entry(workers->order_list.next, | ||
114 | struct btrfs_work, order_list); | ||
115 | } else { | ||
116 | break; | ||
117 | } | ||
111 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | 118 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
112 | break; | 119 | break; |
113 | 120 | ||
@@ -144,8 +151,14 @@ static int worker_loop(void *arg) | |||
144 | do { | 151 | do { |
145 | spin_lock_irq(&worker->lock); | 152 | spin_lock_irq(&worker->lock); |
146 | again_locked: | 153 | again_locked: |
147 | while (!list_empty(&worker->pending)) { | 154 | while (1) { |
148 | cur = worker->pending.next; | 155 | if (!list_empty(&worker->prio_pending)) |
156 | cur = worker->prio_pending.next; | ||
157 | else if (!list_empty(&worker->pending)) | ||
158 | cur = worker->pending.next; | ||
159 | else | ||
160 | break; | ||
161 | |||
149 | work = list_entry(cur, struct btrfs_work, list); | 162 | work = list_entry(cur, struct btrfs_work, list); |
150 | list_del(&work->list); | 163 | list_del(&work->list); |
151 | clear_bit(WORK_QUEUED_BIT, &work->flags); | 164 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
@@ -164,7 +177,6 @@ again_locked: | |||
164 | 177 | ||
165 | spin_lock_irq(&worker->lock); | 178 | spin_lock_irq(&worker->lock); |
166 | check_idle_worker(worker); | 179 | check_idle_worker(worker); |
167 | |||
168 | } | 180 | } |
169 | if (freezing(current)) { | 181 | if (freezing(current)) { |
170 | worker->working = 0; | 182 | worker->working = 0; |
@@ -179,7 +191,8 @@ again_locked: | |||
179 | * jump_in? | 191 | * jump_in? |
180 | */ | 192 | */ |
181 | smp_mb(); | 193 | smp_mb(); |
182 | if (!list_empty(&worker->pending)) | 194 | if (!list_empty(&worker->pending) || |
195 | !list_empty(&worker->prio_pending)) | ||
183 | continue; | 196 | continue; |
184 | 197 | ||
185 | /* | 198 | /* |
@@ -192,13 +205,18 @@ again_locked: | |||
192 | */ | 205 | */ |
193 | schedule_timeout(1); | 206 | schedule_timeout(1); |
194 | smp_mb(); | 207 | smp_mb(); |
195 | if (!list_empty(&worker->pending)) | 208 | if (!list_empty(&worker->pending) || |
209 | !list_empty(&worker->prio_pending)) | ||
196 | continue; | 210 | continue; |
197 | 211 | ||
212 | if (kthread_should_stop()) | ||
213 | break; | ||
214 | |||
198 | /* still no more work?, sleep for real */ | 215 | /* still no more work?, sleep for real */ |
199 | spin_lock_irq(&worker->lock); | 216 | spin_lock_irq(&worker->lock); |
200 | set_current_state(TASK_INTERRUPTIBLE); | 217 | set_current_state(TASK_INTERRUPTIBLE); |
201 | if (!list_empty(&worker->pending)) | 218 | if (!list_empty(&worker->pending) || |
219 | !list_empty(&worker->prio_pending)) | ||
202 | goto again_locked; | 220 | goto again_locked; |
203 | 221 | ||
204 | /* | 222 | /* |
@@ -208,7 +226,8 @@ again_locked: | |||
208 | worker->working = 0; | 226 | worker->working = 0; |
209 | spin_unlock_irq(&worker->lock); | 227 | spin_unlock_irq(&worker->lock); |
210 | 228 | ||
211 | schedule(); | 229 | if (!kthread_should_stop()) |
230 | schedule(); | ||
212 | } | 231 | } |
213 | __set_current_state(TASK_RUNNING); | 232 | __set_current_state(TASK_RUNNING); |
214 | } | 233 | } |
@@ -245,6 +264,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) | |||
245 | INIT_LIST_HEAD(&workers->worker_list); | 264 | INIT_LIST_HEAD(&workers->worker_list); |
246 | INIT_LIST_HEAD(&workers->idle_list); | 265 | INIT_LIST_HEAD(&workers->idle_list); |
247 | INIT_LIST_HEAD(&workers->order_list); | 266 | INIT_LIST_HEAD(&workers->order_list); |
267 | INIT_LIST_HEAD(&workers->prio_order_list); | ||
248 | spin_lock_init(&workers->lock); | 268 | spin_lock_init(&workers->lock); |
249 | workers->max_workers = max; | 269 | workers->max_workers = max; |
250 | workers->idle_thresh = 32; | 270 | workers->idle_thresh = 32; |
@@ -270,6 +290,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |||
270 | } | 290 | } |
271 | 291 | ||
272 | INIT_LIST_HEAD(&worker->pending); | 292 | INIT_LIST_HEAD(&worker->pending); |
293 | INIT_LIST_HEAD(&worker->prio_pending); | ||
273 | INIT_LIST_HEAD(&worker->worker_list); | 294 | INIT_LIST_HEAD(&worker->worker_list); |
274 | spin_lock_init(&worker->lock); | 295 | spin_lock_init(&worker->lock); |
275 | atomic_set(&worker->num_pending, 0); | 296 | atomic_set(&worker->num_pending, 0); |
@@ -393,7 +414,10 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
393 | goto out; | 414 | goto out; |
394 | 415 | ||
395 | spin_lock_irqsave(&worker->lock, flags); | 416 | spin_lock_irqsave(&worker->lock, flags); |
396 | list_add_tail(&work->list, &worker->pending); | 417 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
418 | list_add_tail(&work->list, &worker->prio_pending); | ||
419 | else | ||
420 | list_add_tail(&work->list, &worker->pending); | ||
397 | atomic_inc(&worker->num_pending); | 421 | atomic_inc(&worker->num_pending); |
398 | 422 | ||
399 | /* by definition we're busy, take ourselves off the idle | 423 | /* by definition we're busy, take ourselves off the idle |
@@ -419,6 +443,11 @@ out: | |||
419 | return 0; | 443 | return 0; |
420 | } | 444 | } |
421 | 445 | ||
446 | void btrfs_set_work_high_prio(struct btrfs_work *work) | ||
447 | { | ||
448 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | ||
449 | } | ||
450 | |||
422 | /* | 451 | /* |
423 | * places a struct btrfs_work into the pending queue of one of the kthreads | 452 | * places a struct btrfs_work into the pending queue of one of the kthreads |
424 | */ | 453 | */ |
@@ -435,7 +464,12 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
435 | worker = find_worker(workers); | 464 | worker = find_worker(workers); |
436 | if (workers->ordered) { | 465 | if (workers->ordered) { |
437 | spin_lock_irqsave(&workers->lock, flags); | 466 | spin_lock_irqsave(&workers->lock, flags); |
438 | list_add_tail(&work->order_list, &workers->order_list); | 467 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { |
468 | list_add_tail(&work->order_list, | ||
469 | &workers->prio_order_list); | ||
470 | } else { | ||
471 | list_add_tail(&work->order_list, &workers->order_list); | ||
472 | } | ||
439 | spin_unlock_irqrestore(&workers->lock, flags); | 473 | spin_unlock_irqrestore(&workers->lock, flags); |
440 | } else { | 474 | } else { |
441 | INIT_LIST_HEAD(&work->order_list); | 475 | INIT_LIST_HEAD(&work->order_list); |
@@ -443,7 +477,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
443 | 477 | ||
444 | spin_lock_irqsave(&worker->lock, flags); | 478 | spin_lock_irqsave(&worker->lock, flags); |
445 | 479 | ||
446 | list_add_tail(&work->list, &worker->pending); | 480 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
481 | list_add_tail(&work->list, &worker->prio_pending); | ||
482 | else | ||
483 | list_add_tail(&work->list, &worker->pending); | ||
447 | atomic_inc(&worker->num_pending); | 484 | atomic_inc(&worker->num_pending); |
448 | check_busy_worker(worker); | 485 | check_busy_worker(worker); |
449 | 486 | ||