diff options
author | Tejun Heo <tj@kernel.org> | 2010-07-20 16:09:01 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-07-22 16:58:47 -0400 |
commit | 8af7c12436803291c90295259db23d371a7ad9cc (patch) | |
tree | 5e75360876ac5783a3e64bd35a1715847d90e9ce | |
parent | 8b8edefa2fffbff97f9eec8b70e78ae23abad1a0 (diff) |
fscache: convert operation to use workqueue instead of slow-work
Make fscache operation to use only workqueue instead of combination of
workqueue and slow-work. FSCACHE_OP_SLOW is dropped and
FSCACHE_OP_FAST is renamed to FSCACHE_OP_ASYNC and uses newly added
fscache_op_wq workqueue to execute op->processor().
fscache_operation_init_slow() is dropped and fscache_operation_init()
now takes @processor argument directly.
* Unbound workqueue is used.
* fscache_retrieval_work() is no longer necessary as OP_ASYNC now does
the equivalent thing.
* sysctl fscache.operation_max_active added to control concurrency.
The default value is nr_cpus clamped between 2 and
WQ_UNBOUND_MAX_ACTIVE.
* debugfs support is dropped for now. Tracing API based debug
facility is planned to be added.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David Howells <dhowells@redhat.com>
-rw-r--r-- | fs/cachefiles/rdwr.c | 4 | ||||
-rw-r--r-- | fs/fscache/internal.h | 1 | ||||
-rw-r--r-- | fs/fscache/main.c | 23 | ||||
-rw-r--r-- | fs/fscache/operation.c | 67 | ||||
-rw-r--r-- | fs/fscache/page.c | 36 | ||||
-rw-r--r-- | include/linux/fscache-cache.h | 37 |
6 files changed, 53 insertions, 115 deletions
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 0f0d41fbb03f..0e3c0924cc3a 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c | |||
@@ -422,7 +422,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, | |||
422 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; | 422 | shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; |
423 | 423 | ||
424 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; | 424 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
425 | op->op.flags |= FSCACHE_OP_FAST; | 425 | op->op.flags |= FSCACHE_OP_ASYNC; |
426 | op->op.processor = cachefiles_read_copier; | 426 | op->op.processor = cachefiles_read_copier; |
427 | 427 | ||
428 | pagevec_init(&pagevec, 0); | 428 | pagevec_init(&pagevec, 0); |
@@ -729,7 +729,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, | |||
729 | pagevec_init(&pagevec, 0); | 729 | pagevec_init(&pagevec, 0); |
730 | 730 | ||
731 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; | 731 | op->op.flags &= FSCACHE_OP_KEEP_FLAGS; |
732 | op->op.flags |= FSCACHE_OP_FAST; | 732 | op->op.flags |= FSCACHE_OP_ASYNC; |
733 | op->op.processor = cachefiles_read_copier; | 733 | op->op.processor = cachefiles_read_copier; |
734 | 734 | ||
735 | INIT_LIST_HEAD(&backpages); | 735 | INIT_LIST_HEAD(&backpages); |
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 6e0b5fb25231..6a026441c5a6 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h | |||
@@ -83,6 +83,7 @@ extern unsigned fscache_defer_create; | |||
83 | extern unsigned fscache_debug; | 83 | extern unsigned fscache_debug; |
84 | extern struct kobject *fscache_root; | 84 | extern struct kobject *fscache_root; |
85 | extern struct workqueue_struct *fscache_object_wq; | 85 | extern struct workqueue_struct *fscache_object_wq; |
86 | extern struct workqueue_struct *fscache_op_wq; | ||
86 | DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); | 87 | DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); |
87 | 88 | ||
88 | static inline bool fscache_object_congested(void) | 89 | static inline bool fscache_object_congested(void) |
diff --git a/fs/fscache/main.c b/fs/fscache/main.c index bb8d4c35c7a2..44d13ddab2cc 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c | |||
@@ -42,11 +42,13 @@ MODULE_PARM_DESC(fscache_debug, | |||
42 | 42 | ||
43 | struct kobject *fscache_root; | 43 | struct kobject *fscache_root; |
44 | struct workqueue_struct *fscache_object_wq; | 44 | struct workqueue_struct *fscache_object_wq; |
45 | struct workqueue_struct *fscache_op_wq; | ||
45 | 46 | ||
46 | DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); | 47 | DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); |
47 | 48 | ||
48 | /* these values serve as lower bounds, will be adjusted in fscache_init() */ | 49 | /* these values serve as lower bounds, will be adjusted in fscache_init() */ |
49 | static unsigned fscache_object_max_active = 4; | 50 | static unsigned fscache_object_max_active = 4; |
51 | static unsigned fscache_op_max_active = 2; | ||
50 | 52 | ||
51 | #ifdef CONFIG_SYSCTL | 53 | #ifdef CONFIG_SYSCTL |
52 | static struct ctl_table_header *fscache_sysctl_header; | 54 | static struct ctl_table_header *fscache_sysctl_header; |
@@ -74,6 +76,14 @@ ctl_table fscache_sysctls[] = { | |||
74 | .proc_handler = fscache_max_active_sysctl, | 76 | .proc_handler = fscache_max_active_sysctl, |
75 | .extra1 = &fscache_object_wq, | 77 | .extra1 = &fscache_object_wq, |
76 | }, | 78 | }, |
79 | { | ||
80 | .procname = "operation_max_active", | ||
81 | .data = &fscache_op_max_active, | ||
82 | .maxlen = sizeof(unsigned), | ||
83 | .mode = 0644, | ||
84 | .proc_handler = fscache_max_active_sysctl, | ||
85 | .extra1 = &fscache_op_wq, | ||
86 | }, | ||
77 | {} | 87 | {} |
78 | }; | 88 | }; |
79 | 89 | ||
@@ -110,6 +120,16 @@ static int __init fscache_init(void) | |||
110 | if (!fscache_object_wq) | 120 | if (!fscache_object_wq) |
111 | goto error_object_wq; | 121 | goto error_object_wq; |
112 | 122 | ||
123 | fscache_op_max_active = | ||
124 | clamp_val(fscache_object_max_active / 2, | ||
125 | fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE); | ||
126 | |||
127 | ret = -ENOMEM; | ||
128 | fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND, | ||
129 | fscache_op_max_active); | ||
130 | if (!fscache_op_wq) | ||
131 | goto error_op_wq; | ||
132 | |||
113 | for_each_possible_cpu(cpu) | 133 | for_each_possible_cpu(cpu) |
114 | init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); | 134 | init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); |
115 | 135 | ||
@@ -152,6 +172,8 @@ error_sysctl: | |||
152 | #endif | 172 | #endif |
153 | fscache_proc_cleanup(); | 173 | fscache_proc_cleanup(); |
154 | error_proc: | 174 | error_proc: |
175 | destroy_workqueue(fscache_op_wq); | ||
176 | error_op_wq: | ||
155 | destroy_workqueue(fscache_object_wq); | 177 | destroy_workqueue(fscache_object_wq); |
156 | error_object_wq: | 178 | error_object_wq: |
157 | slow_work_unregister_user(THIS_MODULE); | 179 | slow_work_unregister_user(THIS_MODULE); |
@@ -172,6 +194,7 @@ static void __exit fscache_exit(void) | |||
172 | kmem_cache_destroy(fscache_cookie_jar); | 194 | kmem_cache_destroy(fscache_cookie_jar); |
173 | unregister_sysctl_table(fscache_sysctl_header); | 195 | unregister_sysctl_table(fscache_sysctl_header); |
174 | fscache_proc_cleanup(); | 196 | fscache_proc_cleanup(); |
197 | destroy_workqueue(fscache_op_wq); | ||
175 | destroy_workqueue(fscache_object_wq); | 198 | destroy_workqueue(fscache_object_wq); |
176 | slow_work_unregister_user(THIS_MODULE); | 199 | slow_work_unregister_user(THIS_MODULE); |
177 | printk(KERN_NOTICE "FS-Cache: Unloaded\n"); | 200 | printk(KERN_NOTICE "FS-Cache: Unloaded\n"); |
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index f17cecafae44..b9f34eaede09 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c | |||
@@ -42,16 +42,12 @@ void fscache_enqueue_operation(struct fscache_operation *op) | |||
42 | 42 | ||
43 | fscache_stat(&fscache_n_op_enqueue); | 43 | fscache_stat(&fscache_n_op_enqueue); |
44 | switch (op->flags & FSCACHE_OP_TYPE) { | 44 | switch (op->flags & FSCACHE_OP_TYPE) { |
45 | case FSCACHE_OP_FAST: | 45 | case FSCACHE_OP_ASYNC: |
46 | _debug("queue fast"); | 46 | _debug("queue async"); |
47 | atomic_inc(&op->usage); | 47 | atomic_inc(&op->usage); |
48 | if (!schedule_work(&op->fast_work)) | 48 | if (!queue_work(fscache_op_wq, &op->work)) |
49 | fscache_put_operation(op); | 49 | fscache_put_operation(op); |
50 | break; | 50 | break; |
51 | case FSCACHE_OP_SLOW: | ||
52 | _debug("queue slow"); | ||
53 | slow_work_enqueue(&op->slow_work); | ||
54 | break; | ||
55 | case FSCACHE_OP_MYTHREAD: | 51 | case FSCACHE_OP_MYTHREAD: |
56 | _debug("queue for caller's attention"); | 52 | _debug("queue for caller's attention"); |
57 | break; | 53 | break; |
@@ -455,36 +451,13 @@ void fscache_operation_gc(struct work_struct *work) | |||
455 | } | 451 | } |
456 | 452 | ||
457 | /* | 453 | /* |
458 | * allow the slow work item processor to get a ref on an operation | 454 | * execute an operation using fs_op_wq to provide processing context - |
459 | */ | 455 | * the caller holds a ref to this object, so we don't need to hold one |
460 | static int fscache_op_get_ref(struct slow_work *work) | ||
461 | { | ||
462 | struct fscache_operation *op = | ||
463 | container_of(work, struct fscache_operation, slow_work); | ||
464 | |||
465 | atomic_inc(&op->usage); | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * allow the slow work item processor to discard a ref on an operation | ||
471 | */ | ||
472 | static void fscache_op_put_ref(struct slow_work *work) | ||
473 | { | ||
474 | struct fscache_operation *op = | ||
475 | container_of(work, struct fscache_operation, slow_work); | ||
476 | |||
477 | fscache_put_operation(op); | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * execute an operation using the slow thread pool to provide processing context | ||
482 | * - the caller holds a ref to this object, so we don't need to hold one | ||
483 | */ | 456 | */ |
484 | static void fscache_op_execute(struct slow_work *work) | 457 | void fscache_op_work_func(struct work_struct *work) |
485 | { | 458 | { |
486 | struct fscache_operation *op = | 459 | struct fscache_operation *op = |
487 | container_of(work, struct fscache_operation, slow_work); | 460 | container_of(work, struct fscache_operation, work); |
488 | unsigned long start; | 461 | unsigned long start; |
489 | 462 | ||
490 | _enter("{OBJ%x OP%x,%d}", | 463 | _enter("{OBJ%x OP%x,%d}", |
@@ -494,31 +467,7 @@ static void fscache_op_execute(struct slow_work *work) | |||
494 | start = jiffies; | 467 | start = jiffies; |
495 | op->processor(op); | 468 | op->processor(op); |
496 | fscache_hist(fscache_ops_histogram, start); | 469 | fscache_hist(fscache_ops_histogram, start); |
470 | fscache_put_operation(op); | ||
497 | 471 | ||
498 | _leave(""); | 472 | _leave(""); |
499 | } | 473 | } |
500 | |||
501 | /* | ||
502 | * describe an operation for slow-work debugging | ||
503 | */ | ||
504 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
505 | static void fscache_op_desc(struct slow_work *work, struct seq_file *m) | ||
506 | { | ||
507 | struct fscache_operation *op = | ||
508 | container_of(work, struct fscache_operation, slow_work); | ||
509 | |||
510 | seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx", | ||
511 | op->object->debug_id, op->debug_id, | ||
512 | op->name, op->state, op->flags); | ||
513 | } | ||
514 | #endif | ||
515 | |||
516 | const struct slow_work_ops fscache_op_slow_work_ops = { | ||
517 | .owner = THIS_MODULE, | ||
518 | .get_ref = fscache_op_get_ref, | ||
519 | .put_ref = fscache_op_put_ref, | ||
520 | .execute = fscache_op_execute, | ||
521 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
522 | .desc = fscache_op_desc, | ||
523 | #endif | ||
524 | }; | ||
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 723b889fd219..41c441c2058d 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -105,7 +105,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, | |||
105 | 105 | ||
106 | page_busy: | 106 | page_busy: |
107 | /* we might want to wait here, but that could deadlock the allocator as | 107 | /* we might want to wait here, but that could deadlock the allocator as |
108 | * the slow-work threads writing to the cache may all end up sleeping | 108 | * the work threads writing to the cache may all end up sleeping |
109 | * on memory allocation */ | 109 | * on memory allocation */ |
110 | fscache_stat(&fscache_n_store_vmscan_busy); | 110 | fscache_stat(&fscache_n_store_vmscan_busy); |
111 | return false; | 111 | return false; |
@@ -188,9 +188,8 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) | |||
188 | return -ENOMEM; | 188 | return -ENOMEM; |
189 | } | 189 | } |
190 | 190 | ||
191 | fscache_operation_init(op, NULL); | 191 | fscache_operation_init(op, fscache_attr_changed_op, NULL); |
192 | fscache_operation_init_slow(op, fscache_attr_changed_op); | 192 | op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); |
193 | op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); | ||
194 | fscache_set_op_name(op, "Attr"); | 193 | fscache_set_op_name(op, "Attr"); |
195 | 194 | ||
196 | spin_lock(&cookie->lock); | 195 | spin_lock(&cookie->lock); |
@@ -218,24 +217,6 @@ nobufs: | |||
218 | EXPORT_SYMBOL(__fscache_attr_changed); | 217 | EXPORT_SYMBOL(__fscache_attr_changed); |
219 | 218 | ||
220 | /* | 219 | /* |
221 | * handle secondary execution given to a retrieval op on behalf of the | ||
222 | * cache | ||
223 | */ | ||
224 | static void fscache_retrieval_work(struct work_struct *work) | ||
225 | { | ||
226 | struct fscache_retrieval *op = | ||
227 | container_of(work, struct fscache_retrieval, op.fast_work); | ||
228 | unsigned long start; | ||
229 | |||
230 | _enter("{OP%x}", op->op.debug_id); | ||
231 | |||
232 | start = jiffies; | ||
233 | op->op.processor(&op->op); | ||
234 | fscache_hist(fscache_ops_histogram, start); | ||
235 | fscache_put_operation(&op->op); | ||
236 | } | ||
237 | |||
238 | /* | ||
239 | * release a retrieval op reference | 220 | * release a retrieval op reference |
240 | */ | 221 | */ |
241 | static void fscache_release_retrieval_op(struct fscache_operation *_op) | 222 | static void fscache_release_retrieval_op(struct fscache_operation *_op) |
@@ -269,13 +250,12 @@ static struct fscache_retrieval *fscache_alloc_retrieval( | |||
269 | return NULL; | 250 | return NULL; |
270 | } | 251 | } |
271 | 252 | ||
272 | fscache_operation_init(&op->op, fscache_release_retrieval_op); | 253 | fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op); |
273 | op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); | 254 | op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); |
274 | op->mapping = mapping; | 255 | op->mapping = mapping; |
275 | op->end_io_func = end_io_func; | 256 | op->end_io_func = end_io_func; |
276 | op->context = context; | 257 | op->context = context; |
277 | op->start_time = jiffies; | 258 | op->start_time = jiffies; |
278 | INIT_WORK(&op->op.fast_work, fscache_retrieval_work); | ||
279 | INIT_LIST_HEAD(&op->to_do); | 259 | INIT_LIST_HEAD(&op->to_do); |
280 | fscache_set_op_name(&op->op, "Retr"); | 260 | fscache_set_op_name(&op->op, "Retr"); |
281 | return op; | 261 | return op; |
@@ -795,9 +775,9 @@ int __fscache_write_page(struct fscache_cookie *cookie, | |||
795 | if (!op) | 775 | if (!op) |
796 | goto nomem; | 776 | goto nomem; |
797 | 777 | ||
798 | fscache_operation_init(&op->op, fscache_release_write_op); | 778 | fscache_operation_init(&op->op, fscache_write_op, |
799 | fscache_operation_init_slow(&op->op, fscache_write_op); | 779 | fscache_release_write_op); |
800 | op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); | 780 | op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING); |
801 | fscache_set_op_name(&op->op, "Write1"); | 781 | fscache_set_op_name(&op->op, "Write1"); |
802 | 782 | ||
803 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | 783 | ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); |
@@ -852,7 +832,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, | |||
852 | fscache_stat(&fscache_n_store_ops); | 832 | fscache_stat(&fscache_n_store_ops); |
853 | fscache_stat(&fscache_n_stores_ok); | 833 | fscache_stat(&fscache_n_stores_ok); |
854 | 834 | ||
855 | /* the slow work queue now carries its own ref on the object */ | 835 | /* the work queue now carries its own ref on the object */ |
856 | fscache_put_operation(&op->op); | 836 | fscache_put_operation(&op->op); |
857 | _leave(" = 0"); | 837 | _leave(" = 0"); |
858 | return 0; | 838 | return 0; |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 27c8df503152..17ed9c1dbfbe 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
@@ -77,18 +77,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | |||
77 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | 77 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); |
78 | 78 | ||
79 | struct fscache_operation { | 79 | struct fscache_operation { |
80 | union { | 80 | struct work_struct work; /* record for async ops */ |
81 | struct work_struct fast_work; /* record for fast ops */ | ||
82 | struct slow_work slow_work; /* record for (very) slow ops */ | ||
83 | }; | ||
84 | struct list_head pend_link; /* link in object->pending_ops */ | 81 | struct list_head pend_link; /* link in object->pending_ops */ |
85 | struct fscache_object *object; /* object to be operated upon */ | 82 | struct fscache_object *object; /* object to be operated upon */ |
86 | 83 | ||
87 | unsigned long flags; | 84 | unsigned long flags; |
88 | #define FSCACHE_OP_TYPE 0x000f /* operation type */ | 85 | #define FSCACHE_OP_TYPE 0x000f /* operation type */ |
89 | #define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ | 86 | #define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */ |
90 | #define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ | 87 | #define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ |
91 | #define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ | ||
92 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ | 88 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ |
93 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ | 89 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ |
94 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ | 90 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ |
@@ -106,7 +102,8 @@ struct fscache_operation { | |||
106 | /* operation releaser */ | 102 | /* operation releaser */ |
107 | fscache_operation_release_t release; | 103 | fscache_operation_release_t release; |
108 | 104 | ||
109 | #ifdef CONFIG_SLOW_WORK_DEBUG | 105 | #ifdef CONFIG_WORKQUEUE_DEBUGFS |
106 | struct work_struct put_work; /* work to delay operation put */ | ||
110 | const char *name; /* operation name */ | 107 | const char *name; /* operation name */ |
111 | const char *state; /* operation state */ | 108 | const char *state; /* operation state */ |
112 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) | 109 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) |
@@ -118,7 +115,7 @@ struct fscache_operation { | |||
118 | }; | 115 | }; |
119 | 116 | ||
120 | extern atomic_t fscache_op_debug_id; | 117 | extern atomic_t fscache_op_debug_id; |
121 | extern const struct slow_work_ops fscache_op_slow_work_ops; | 118 | extern void fscache_op_work_func(struct work_struct *work); |
122 | 119 | ||
123 | extern void fscache_enqueue_operation(struct fscache_operation *); | 120 | extern void fscache_enqueue_operation(struct fscache_operation *); |
124 | extern void fscache_put_operation(struct fscache_operation *); | 121 | extern void fscache_put_operation(struct fscache_operation *); |
@@ -129,33 +126,21 @@ extern void fscache_put_operation(struct fscache_operation *); | |||
129 | * @release: The release function to assign | 126 | * @release: The release function to assign |
130 | * | 127 | * |
131 | * Do basic initialisation of an operation. The caller must still set flags, | 128 | * Do basic initialisation of an operation. The caller must still set flags, |
132 | * object, either fast_work or slow_work if necessary, and processor if needed. | 129 | * object and processor if needed. |
133 | */ | 130 | */ |
134 | static inline void fscache_operation_init(struct fscache_operation *op, | 131 | static inline void fscache_operation_init(struct fscache_operation *op, |
135 | fscache_operation_release_t release) | 132 | fscache_operation_processor_t processor, |
133 | fscache_operation_release_t release) | ||
136 | { | 134 | { |
135 | INIT_WORK(&op->work, fscache_op_work_func); | ||
137 | atomic_set(&op->usage, 1); | 136 | atomic_set(&op->usage, 1); |
138 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | 137 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); |
138 | op->processor = processor; | ||
139 | op->release = release; | 139 | op->release = release; |
140 | INIT_LIST_HEAD(&op->pend_link); | 140 | INIT_LIST_HEAD(&op->pend_link); |
141 | fscache_set_op_state(op, "Init"); | 141 | fscache_set_op_state(op, "Init"); |
142 | } | 142 | } |
143 | 143 | ||
144 | /** | ||
145 | * fscache_operation_init_slow - Do additional initialisation of a slow op | ||
146 | * @op: The operation to initialise | ||
147 | * @processor: The processor function to assign | ||
148 | * | ||
149 | * Do additional initialisation of an operation as required for slow work. | ||
150 | */ | ||
151 | static inline | ||
152 | void fscache_operation_init_slow(struct fscache_operation *op, | ||
153 | fscache_operation_processor_t processor) | ||
154 | { | ||
155 | op->processor = processor; | ||
156 | slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); | ||
157 | } | ||
158 | |||
159 | /* | 144 | /* |
160 | * data read operation | 145 | * data read operation |
161 | */ | 146 | */ |