aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2012-12-20 16:52:35 -0500
committerDavid Howells <dhowells@redhat.com>2012-12-20 16:58:26 -0500
commit9f10523f891928330b7529da54c1a3cc65180b1a (patch)
tree014731e89d44d1ca86cc665f4d39d8d2c25c69bf /fs
parentef46ed888efb1e8da33be5d33c9b54476289a43b (diff)
FS-Cache: Fix operation state management and accounting
Fix the state management of internal fscache operations and the accounting of what operations are in what states. This is done by: (1) Give struct fscache_operation a enum variable that directly represents the state it's currently in, rather than spreading this knowledge over a bunch of flags, who's processing the operation at the moment and whether it is queued or not. This makes it easier to write assertions to check the state at various points and to prevent invalid state transitions. (2) Add an 'operation complete' state and supply a function to indicate the completion of an operation (fscache_op_complete()) and make things call it. The final call to fscache_put_operation() can then check that an op in the appropriate state (complete or cancelled). (3) Adjust the use of object->n_ops, ->n_in_progress, ->n_exclusive to better govern the state of an object: (a) The ->n_ops is now the number of extant operations on the object and is now decremented by fscache_put_operation() only. (b) The ->n_in_progress is simply the number of objects that have been taken off of the object's pending queue for the purposes of being run. This is decremented by fscache_op_complete() only. (c) The ->n_exclusive is the number of exclusive ops that have been submitted and queued or are in progress. It is decremented by fscache_op_complete() and by fscache_cancel_op(). fscache_put_operation() and fscache_operation_gc() now no longer try to clean up ->n_exclusive and ->n_in_progress. That was leading to double decrements against fscache_cancel_op(). fscache_cancel_op() now no longer decrements ->n_ops. That was leading to double decrements against fscache_put_operation(). fscache_submit_exclusive_op() now decides whether it has to queue an op based on ->n_in_progress being > 0 rather than ->n_ops > 0 as the latter will persist in being true even after all preceding operations have been cancelled or completed. Furthermore, if an object is active and there are runnable ops against it, there must be at least one op running. (4) Add a remaining-pages counter (n_pages) to struct fscache_retrieval and provide a function to record completion of the pages as they complete. When n_pages reaches 0, the operation is deemed to be complete and fscache_op_complete() is called. Add calls to fscache_retrieval_complete() anywhere we've finished with a page we've been given to read or allocate for. This includes places where we just return pages to the netfs for reading from the server and where accessing the cache fails and we discard the proposed netfs page. The bugs in the unfixed state management manifest themselves as oopses like the following where the operation completion gets out of sync with return of the cookie by the netfs. This is possible because the cache unlocks and returns all the netfs pages before recording its completion - which means that there's nothing to stop the netfs discarding them and returning the cookie. FS-Cache: Cookie 'NFS.fh' still has outstanding reads ------------[ cut here ]------------ kernel BUG at fs/fscache/cookie.c:519! invalid opcode: 0000 [#1] SMP CPU 1 Modules linked in: cachefiles nfs fscache auth_rpcgss nfs_acl lockd sunrpc Pid: 400, comm: kswapd0 Not tainted 3.1.0-rc7-fsdevel+ #1090 /DG965RY RIP: 0010:[<ffffffffa007050a>] [<ffffffffa007050a>] __fscache_relinquish_cookie+0x170/0x343 [fscache] RSP: 0018:ffff8800368cfb00 EFLAGS: 00010282 RAX: 000000000000003c RBX: ffff880023cc8790 RCX: 0000000000000000 RDX: 0000000000002f2e RSI: 0000000000000001 RDI: ffffffff813ab86c RBP: ffff8800368cfb50 R08: 0000000000000002 R09: 0000000000000000 R10: ffff88003a1b7890 R11: ffff88001df6e488 R12: ffff880023d8ed98 R13: ffff880023cc8798 R14: 0000000000000004 R15: ffff88003b8bf370 FS: 0000000000000000(0000) GS:ffff88003bd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000008ba008 CR3: 0000000023d93000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process kswapd0 (pid: 400, threadinfo ffff8800368ce000, task ffff88003b8bf040) Stack: ffff88003b8bf040 ffff88001df6e528 ffff88001df6e528 ffffffffa00b46b0 ffff88003b8bf040 ffff88001df6e488 ffff88001df6e620 ffffffffa00b46b0 ffff88001ebd04c8 0000000000000004 ffff8800368cfb70 ffffffffa00b2c91 Call Trace: [<ffffffffa00b2c91>] nfs_fscache_release_inode_cookie+0x3b/0x47 [nfs] [<ffffffffa008f25f>] nfs_clear_inode+0x3c/0x41 [nfs] [<ffffffffa0090df1>] nfs4_evict_inode+0x2f/0x33 [nfs] [<ffffffff810d8d47>] evict+0xa1/0x15c [<ffffffff810d8e2e>] dispose_list+0x2c/0x38 [<ffffffff810d9ebd>] prune_icache_sb+0x28c/0x29b [<ffffffff810c56b7>] prune_super+0xd5/0x140 [<ffffffff8109b615>] shrink_slab+0x102/0x1ab [<ffffffff8109d690>] balance_pgdat+0x2f2/0x595 [<ffffffff8103e009>] ? process_timeout+0xb/0xb [<ffffffff8109dba3>] kswapd+0x270/0x289 [<ffffffff8104c5ea>] ? __init_waitqueue_head+0x46/0x46 [<ffffffff8109d933>] ? balance_pgdat+0x595/0x595 [<ffffffff8104bf7a>] kthread+0x7f/0x87 [<ffffffff813ad6b4>] kernel_thread_helper+0x4/0x10 [<ffffffff81026b98>] ? finish_task_switch+0x45/0xc0 [<ffffffff813abcdd>] ? retint_restore_args+0xe/0xe [<ffffffff8104befb>] ? __init_kthread_worker+0x53/0x53 [<ffffffff813ad6b0>] ? gs_change+0xb/0xb Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/cachefiles/rdwr.c31
-rw-r--r--fs/fscache/object.c2
-rw-r--r--fs/fscache/operation.c91
-rw-r--r--fs/fscache/page.c25
4 files changed, 106 insertions, 43 deletions
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index bf123d9c3206..93a0815e0498 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -197,6 +197,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
197 197
198 fscache_end_io(op, monitor->netfs_page, error); 198 fscache_end_io(op, monitor->netfs_page, error);
199 page_cache_release(monitor->netfs_page); 199 page_cache_release(monitor->netfs_page);
200 fscache_retrieval_complete(op, 1);
200 fscache_put_retrieval(op); 201 fscache_put_retrieval(op);
201 kfree(monitor); 202 kfree(monitor);
202 203
@@ -339,6 +340,7 @@ backing_page_already_uptodate:
339 340
340 copy_highpage(netpage, backpage); 341 copy_highpage(netpage, backpage);
341 fscache_end_io(op, netpage, 0); 342 fscache_end_io(op, netpage, 0);
343 fscache_retrieval_complete(op, 1);
342 344
343success: 345success:
344 _debug("success"); 346 _debug("success");
@@ -360,6 +362,7 @@ read_error:
360 goto out; 362 goto out;
361io_error: 363io_error:
362 cachefiles_io_error_obj(object, "Page read error on backing file"); 364 cachefiles_io_error_obj(object, "Page read error on backing file");
365 fscache_retrieval_complete(op, 1);
363 ret = -ENOBUFS; 366 ret = -ENOBUFS;
364 goto out; 367 goto out;
365 368
@@ -369,6 +372,7 @@ nomem_monitor:
369 fscache_put_retrieval(monitor->op); 372 fscache_put_retrieval(monitor->op);
370 kfree(monitor); 373 kfree(monitor);
371nomem: 374nomem:
375 fscache_retrieval_complete(op, 1);
372 _leave(" = -ENOMEM"); 376 _leave(" = -ENOMEM");
373 return -ENOMEM; 377 return -ENOMEM;
374} 378}
@@ -407,7 +411,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
407 _enter("{%p},{%lx},,,", object, page->index); 411 _enter("{%p},{%lx},,,", object, page->index);
408 412
409 if (!object->backer) 413 if (!object->backer)
410 return -ENOBUFS; 414 goto enobufs;
411 415
412 inode = object->backer->d_inode; 416 inode = object->backer->d_inode;
413 ASSERT(S_ISREG(inode->i_mode)); 417 ASSERT(S_ISREG(inode->i_mode));
@@ -416,7 +420,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
416 420
417 /* calculate the shift required to use bmap */ 421 /* calculate the shift required to use bmap */
418 if (inode->i_sb->s_blocksize > PAGE_SIZE) 422 if (inode->i_sb->s_blocksize > PAGE_SIZE)
419 return -ENOBUFS; 423 goto enobufs;
420 424
421 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 425 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
422 426
@@ -448,13 +452,19 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
448 } else if (cachefiles_has_space(cache, 0, 1) == 0) { 452 } else if (cachefiles_has_space(cache, 0, 1) == 0) {
449 /* there's space in the cache we can use */ 453 /* there's space in the cache we can use */
450 fscache_mark_page_cached(op, page); 454 fscache_mark_page_cached(op, page);
455 fscache_retrieval_complete(op, 1);
451 ret = -ENODATA; 456 ret = -ENODATA;
452 } else { 457 } else {
453 ret = -ENOBUFS; 458 goto enobufs;
454 } 459 }
455 460
456 _leave(" = %d", ret); 461 _leave(" = %d", ret);
457 return ret; 462 return ret;
463
464enobufs:
465 fscache_retrieval_complete(op, 1);
466 _leave(" = -ENOBUFS");
467 return -ENOBUFS;
458} 468}
459 469
460/* 470/*
@@ -632,6 +642,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
632 642
633 /* the netpage is unlocked and marked up to date here */ 643 /* the netpage is unlocked and marked up to date here */
634 fscache_end_io(op, netpage, 0); 644 fscache_end_io(op, netpage, 0);
645 fscache_retrieval_complete(op, 1);
635 page_cache_release(netpage); 646 page_cache_release(netpage);
636 netpage = NULL; 647 netpage = NULL;
637 continue; 648 continue;
@@ -659,6 +670,7 @@ out:
659 list_for_each_entry_safe(netpage, _n, list, lru) { 670 list_for_each_entry_safe(netpage, _n, list, lru) {
660 list_del(&netpage->lru); 671 list_del(&netpage->lru);
661 page_cache_release(netpage); 672 page_cache_release(netpage);
673 fscache_retrieval_complete(op, 1);
662 } 674 }
663 675
664 _leave(" = %d", ret); 676 _leave(" = %d", ret);
@@ -707,7 +719,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
707 *nr_pages); 719 *nr_pages);
708 720
709 if (!object->backer) 721 if (!object->backer)
710 return -ENOBUFS; 722 goto all_enobufs;
711 723
712 space = 1; 724 space = 1;
713 if (cachefiles_has_space(cache, 0, *nr_pages) < 0) 725 if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
@@ -720,7 +732,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
720 732
721 /* calculate the shift required to use bmap */ 733 /* calculate the shift required to use bmap */
722 if (inode->i_sb->s_blocksize > PAGE_SIZE) 734 if (inode->i_sb->s_blocksize > PAGE_SIZE)
723 return -ENOBUFS; 735 goto all_enobufs;
724 736
725 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 737 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
726 738
@@ -760,7 +772,10 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
760 nrbackpages++; 772 nrbackpages++;
761 } else if (space && pagevec_add(&pagevec, page) == 0) { 773 } else if (space && pagevec_add(&pagevec, page) == 0) {
762 fscache_mark_pages_cached(op, &pagevec); 774 fscache_mark_pages_cached(op, &pagevec);
775 fscache_retrieval_complete(op, 1);
763 ret = -ENODATA; 776 ret = -ENODATA;
777 } else {
778 fscache_retrieval_complete(op, 1);
764 } 779 }
765 } 780 }
766 781
@@ -781,6 +796,10 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
781 _leave(" = %d [nr=%u%s]", 796 _leave(" = %d [nr=%u%s]",
782 ret, *nr_pages, list_empty(pages) ? " empty" : ""); 797 ret, *nr_pages, list_empty(pages) ? " empty" : "");
783 return ret; 798 return ret;
799
800all_enobufs:
801 fscache_retrieval_complete(op, *nr_pages);
802 return -ENOBUFS;
784} 803}
785 804
786/* 805/*
@@ -815,6 +834,7 @@ int cachefiles_allocate_page(struct fscache_retrieval *op,
815 else 834 else
816 ret = -ENOBUFS; 835 ret = -ENOBUFS;
817 836
837 fscache_retrieval_complete(op, 1);
818 _leave(" = %d", ret); 838 _leave(" = %d", ret);
819 return ret; 839 return ret;
820} 840}
@@ -864,6 +884,7 @@ int cachefiles_allocate_pages(struct fscache_retrieval *op,
864 ret = -ENOBUFS; 884 ret = -ENOBUFS;
865 } 885 }
866 886
887 fscache_retrieval_complete(op, *nr_pages);
867 _leave(" = %d", ret); 888 _leave(" = %d", ret);
868 return ret; 889 return ret;
869} 890}
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index b6b897c550ac..773bc798a416 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -587,8 +587,6 @@ static void fscache_object_available(struct fscache_object *object)
587 if (object->n_in_progress == 0) { 587 if (object->n_in_progress == 0) {
588 if (object->n_ops > 0) { 588 if (object->n_ops > 0) {
589 ASSERTCMP(object->n_ops, >=, object->n_obj_ops); 589 ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
590 ASSERTIF(object->n_ops > object->n_obj_ops,
591 !list_empty(&object->pending_ops));
592 fscache_start_operations(object); 590 fscache_start_operations(object);
593 } else { 591 } else {
594 ASSERT(list_empty(&object->pending_ops)); 592 ASSERT(list_empty(&object->pending_ops));
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index c857ab824d6e..748f9553c2cb 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -37,6 +37,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
37 ASSERT(op->processor != NULL); 37 ASSERT(op->processor != NULL);
38 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); 38 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39 ASSERTCMP(atomic_read(&op->usage), >, 0); 39 ASSERTCMP(atomic_read(&op->usage), >, 0);
40 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
40 41
41 fscache_stat(&fscache_n_op_enqueue); 42 fscache_stat(&fscache_n_op_enqueue);
42 switch (op->flags & FSCACHE_OP_TYPE) { 43 switch (op->flags & FSCACHE_OP_TYPE) {
@@ -64,6 +65,9 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
64static void fscache_run_op(struct fscache_object *object, 65static void fscache_run_op(struct fscache_object *object,
65 struct fscache_operation *op) 66 struct fscache_operation *op)
66{ 67{
68 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
69
70 op->state = FSCACHE_OP_ST_IN_PROGRESS;
67 object->n_in_progress++; 71 object->n_in_progress++;
68 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 72 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
69 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 73 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -80,22 +84,23 @@ static void fscache_run_op(struct fscache_object *object,
80int fscache_submit_exclusive_op(struct fscache_object *object, 84int fscache_submit_exclusive_op(struct fscache_object *object,
81 struct fscache_operation *op) 85 struct fscache_operation *op)
82{ 86{
83 int ret;
84
85 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); 87 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
86 88
89 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
90 ASSERTCMP(atomic_read(&op->usage), >, 0);
91
87 spin_lock(&object->lock); 92 spin_lock(&object->lock);
88 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 93 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
89 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 94 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
90 ASSERT(list_empty(&op->pend_link)); 95 ASSERT(list_empty(&op->pend_link));
91 96
92 ret = -ENOBUFS; 97 op->state = FSCACHE_OP_ST_PENDING;
93 if (fscache_object_is_active(object)) { 98 if (fscache_object_is_active(object)) {
94 op->object = object; 99 op->object = object;
95 object->n_ops++; 100 object->n_ops++;
96 object->n_exclusive++; /* reads and writes must wait */ 101 object->n_exclusive++; /* reads and writes must wait */
97 102
98 if (object->n_ops > 1) { 103 if (object->n_in_progress > 0) {
99 atomic_inc(&op->usage); 104 atomic_inc(&op->usage);
100 list_add_tail(&op->pend_link, &object->pending_ops); 105 list_add_tail(&op->pend_link, &object->pending_ops);
101 fscache_stat(&fscache_n_op_pend); 106 fscache_stat(&fscache_n_op_pend);
@@ -111,7 +116,6 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
111 116
112 /* need to issue a new write op after this */ 117 /* need to issue a new write op after this */
113 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 118 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
114 ret = 0;
115 } else if (object->state == FSCACHE_OBJECT_CREATING) { 119 } else if (object->state == FSCACHE_OBJECT_CREATING) {
116 op->object = object; 120 op->object = object;
117 object->n_ops++; 121 object->n_ops++;
@@ -119,14 +123,13 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
119 atomic_inc(&op->usage); 123 atomic_inc(&op->usage);
120 list_add_tail(&op->pend_link, &object->pending_ops); 124 list_add_tail(&op->pend_link, &object->pending_ops);
121 fscache_stat(&fscache_n_op_pend); 125 fscache_stat(&fscache_n_op_pend);
122 ret = 0;
123 } else { 126 } else {
124 /* not allowed to submit ops in any other state */ 127 /* not allowed to submit ops in any other state */
125 BUG(); 128 BUG();
126 } 129 }
127 130
128 spin_unlock(&object->lock); 131 spin_unlock(&object->lock);
129 return ret; 132 return 0;
130} 133}
131 134
132/* 135/*
@@ -186,6 +189,7 @@ int fscache_submit_op(struct fscache_object *object,
186 _enter("{OBJ%x OP%x},{%u}", 189 _enter("{OBJ%x OP%x},{%u}",
187 object->debug_id, op->debug_id, atomic_read(&op->usage)); 190 object->debug_id, op->debug_id, atomic_read(&op->usage));
188 191
192 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
189 ASSERTCMP(atomic_read(&op->usage), >, 0); 193 ASSERTCMP(atomic_read(&op->usage), >, 0);
190 194
191 spin_lock(&object->lock); 195 spin_lock(&object->lock);
@@ -196,6 +200,7 @@ int fscache_submit_op(struct fscache_object *object,
196 ostate = object->state; 200 ostate = object->state;
197 smp_rmb(); 201 smp_rmb();
198 202
203 op->state = FSCACHE_OP_ST_PENDING;
199 if (fscache_object_is_active(object)) { 204 if (fscache_object_is_active(object)) {
200 op->object = object; 205 op->object = object;
201 object->n_ops++; 206 object->n_ops++;
@@ -225,12 +230,15 @@ int fscache_submit_op(struct fscache_object *object,
225 object->state == FSCACHE_OBJECT_LC_DYING || 230 object->state == FSCACHE_OBJECT_LC_DYING ||
226 object->state == FSCACHE_OBJECT_WITHDRAWING) { 231 object->state == FSCACHE_OBJECT_WITHDRAWING) {
227 fscache_stat(&fscache_n_op_rejected); 232 fscache_stat(&fscache_n_op_rejected);
233 op->state = FSCACHE_OP_ST_CANCELLED;
228 ret = -ENOBUFS; 234 ret = -ENOBUFS;
229 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { 235 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
230 fscache_report_unexpected_submission(object, op, ostate); 236 fscache_report_unexpected_submission(object, op, ostate);
231 ASSERT(!fscache_object_is_active(object)); 237 ASSERT(!fscache_object_is_active(object));
238 op->state = FSCACHE_OP_ST_CANCELLED;
232 ret = -ENOBUFS; 239 ret = -ENOBUFS;
233 } else { 240 } else {
241 op->state = FSCACHE_OP_ST_CANCELLED;
234 ret = -ENOBUFS; 242 ret = -ENOBUFS;
235 } 243 }
236 244
@@ -290,13 +298,18 @@ int fscache_cancel_op(struct fscache_operation *op)
290 298
291 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); 299 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
292 300
301 ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
302 ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
303 ASSERTCMP(atomic_read(&op->usage), >, 0);
304
293 spin_lock(&object->lock); 305 spin_lock(&object->lock);
294 306
295 ret = -EBUSY; 307 ret = -EBUSY;
296 if (!list_empty(&op->pend_link)) { 308 if (op->state == FSCACHE_OP_ST_PENDING) {
309 ASSERT(!list_empty(&op->pend_link));
297 fscache_stat(&fscache_n_op_cancelled); 310 fscache_stat(&fscache_n_op_cancelled);
298 list_del_init(&op->pend_link); 311 list_del_init(&op->pend_link);
299 object->n_ops--; 312 op->state = FSCACHE_OP_ST_CANCELLED;
300 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) 313 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
301 object->n_exclusive--; 314 object->n_exclusive--;
302 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 315 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
@@ -311,6 +324,37 @@ int fscache_cancel_op(struct fscache_operation *op)
311} 324}
312 325
313/* 326/*
327 * Record the completion of an in-progress operation.
328 */
329void fscache_op_complete(struct fscache_operation *op)
330{
331 struct fscache_object *object = op->object;
332
333 _enter("OBJ%x", object->debug_id);
334
335 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
336 ASSERTCMP(object->n_in_progress, >, 0);
337 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
338 object->n_exclusive, >, 0);
339 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
340 object->n_in_progress, ==, 1);
341
342 spin_lock(&object->lock);
343
344 op->state = FSCACHE_OP_ST_COMPLETE;
345
346 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
347 object->n_exclusive--;
348 object->n_in_progress--;
349 if (object->n_in_progress == 0)
350 fscache_start_operations(object);
351
352 spin_unlock(&object->lock);
353 _leave("");
354}
355EXPORT_SYMBOL(fscache_op_complete);
356
357/*
314 * release an operation 358 * release an operation
315 * - queues pending ops if this is the last in-progress op 359 * - queues pending ops if this is the last in-progress op
316 */ 360 */
@@ -328,8 +372,9 @@ void fscache_put_operation(struct fscache_operation *op)
328 return; 372 return;
329 373
330 _debug("PUT OP"); 374 _debug("PUT OP");
331 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) 375 ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
332 BUG(); 376 op->state, ==, FSCACHE_OP_ST_CANCELLED);
377 op->state = FSCACHE_OP_ST_DEAD;
333 378
334 fscache_stat(&fscache_n_op_release); 379 fscache_stat(&fscache_n_op_release);
335 380
@@ -365,16 +410,6 @@ void fscache_put_operation(struct fscache_operation *op)
365 return; 410 return;
366 } 411 }
367 412
368 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
369 ASSERTCMP(object->n_exclusive, >, 0);
370 object->n_exclusive--;
371 }
372
373 ASSERTCMP(object->n_in_progress, >, 0);
374 object->n_in_progress--;
375 if (object->n_in_progress == 0)
376 fscache_start_operations(object);
377
378 ASSERTCMP(object->n_ops, >, 0); 413 ASSERTCMP(object->n_ops, >, 0);
379 object->n_ops--; 414 object->n_ops--;
380 if (object->n_ops == 0) 415 if (object->n_ops == 0)
@@ -413,23 +448,14 @@ void fscache_operation_gc(struct work_struct *work)
413 spin_unlock(&cache->op_gc_list_lock); 448 spin_unlock(&cache->op_gc_list_lock);
414 449
415 object = op->object; 450 object = op->object;
451 spin_lock(&object->lock);
416 452
417 _debug("GC DEFERRED REL OBJ%x OP%x", 453 _debug("GC DEFERRED REL OBJ%x OP%x",
418 object->debug_id, op->debug_id); 454 object->debug_id, op->debug_id);
419 fscache_stat(&fscache_n_op_gc); 455 fscache_stat(&fscache_n_op_gc);
420 456
421 ASSERTCMP(atomic_read(&op->usage), ==, 0); 457 ASSERTCMP(atomic_read(&op->usage), ==, 0);
422 458 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
423 spin_lock(&object->lock);
424 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
425 ASSERTCMP(object->n_exclusive, >, 0);
426 object->n_exclusive--;
427 }
428
429 ASSERTCMP(object->n_in_progress, >, 0);
430 object->n_in_progress--;
431 if (object->n_in_progress == 0)
432 fscache_start_operations(object);
433 459
434 ASSERTCMP(object->n_ops, >, 0); 460 ASSERTCMP(object->n_ops, >, 0);
435 object->n_ops--; 461 object->n_ops--;
@@ -437,6 +463,7 @@ void fscache_operation_gc(struct work_struct *work)
437 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); 463 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
438 464
439 spin_unlock(&object->lock); 465 spin_unlock(&object->lock);
466 kfree(op);
440 467
441 } while (count++ < 20); 468 } while (count++ < 20);
442 469
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 248a12e22532..b38b13d2a555 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -162,6 +162,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
162 fscache_abort_object(object); 162 fscache_abort_object(object);
163 } 163 }
164 164
165 fscache_op_complete(op);
165 _leave(""); 166 _leave("");
166} 167}
167 168
@@ -223,6 +224,8 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
223 224
224 _enter("{OP%x}", op->op.debug_id); 225 _enter("{OP%x}", op->op.debug_id);
225 226
227 ASSERTCMP(op->n_pages, ==, 0);
228
226 fscache_hist(fscache_retrieval_histogram, op->start_time); 229 fscache_hist(fscache_retrieval_histogram, op->start_time);
227 if (op->context) 230 if (op->context)
228 fscache_put_context(op->op.object->cookie, op->context); 231 fscache_put_context(op->op.object->cookie, op->context);
@@ -320,6 +323,11 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
320 _debug("<<< GO"); 323 _debug("<<< GO");
321 324
322check_if_dead: 325check_if_dead:
326 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
327 fscache_stat(stat_object_dead);
328 _leave(" = -ENOBUFS [cancelled]");
329 return -ENOBUFS;
330 }
323 if (unlikely(fscache_object_is_dead(object))) { 331 if (unlikely(fscache_object_is_dead(object))) {
324 fscache_stat(stat_object_dead); 332 fscache_stat(stat_object_dead);
325 return -ENOBUFS; 333 return -ENOBUFS;
@@ -364,6 +372,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
364 _leave(" = -ENOMEM"); 372 _leave(" = -ENOMEM");
365 return -ENOMEM; 373 return -ENOMEM;
366 } 374 }
375 op->n_pages = 1;
367 376
368 spin_lock(&cookie->lock); 377 spin_lock(&cookie->lock);
369 378
@@ -375,10 +384,10 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
375 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); 384 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
376 385
377 atomic_inc(&object->n_reads); 386 atomic_inc(&object->n_reads);
378 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 387 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
379 388
380 if (fscache_submit_op(object, &op->op) < 0) 389 if (fscache_submit_op(object, &op->op) < 0)
381 goto nobufs_unlock; 390 goto nobufs_unlock_dec;
382 spin_unlock(&cookie->lock); 391 spin_unlock(&cookie->lock);
383 392
384 fscache_stat(&fscache_n_retrieval_ops); 393 fscache_stat(&fscache_n_retrieval_ops);
@@ -425,6 +434,8 @@ error:
425 _leave(" = %d", ret); 434 _leave(" = %d", ret);
426 return ret; 435 return ret;
427 436
437nobufs_unlock_dec:
438 atomic_dec(&object->n_reads);
428nobufs_unlock: 439nobufs_unlock:
429 spin_unlock(&cookie->lock); 440 spin_unlock(&cookie->lock);
430 kfree(op); 441 kfree(op);
@@ -482,6 +493,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
482 op = fscache_alloc_retrieval(mapping, end_io_func, context); 493 op = fscache_alloc_retrieval(mapping, end_io_func, context);
483 if (!op) 494 if (!op)
484 return -ENOMEM; 495 return -ENOMEM;
496 op->n_pages = *nr_pages;
485 497
486 spin_lock(&cookie->lock); 498 spin_lock(&cookie->lock);
487 499
@@ -491,10 +503,10 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
491 struct fscache_object, cookie_link); 503 struct fscache_object, cookie_link);
492 504
493 atomic_inc(&object->n_reads); 505 atomic_inc(&object->n_reads);
494 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 506 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
495 507
496 if (fscache_submit_op(object, &op->op) < 0) 508 if (fscache_submit_op(object, &op->op) < 0)
497 goto nobufs_unlock; 509 goto nobufs_unlock_dec;
498 spin_unlock(&cookie->lock); 510 spin_unlock(&cookie->lock);
499 511
500 fscache_stat(&fscache_n_retrieval_ops); 512 fscache_stat(&fscache_n_retrieval_ops);
@@ -541,6 +553,8 @@ error:
541 _leave(" = %d", ret); 553 _leave(" = %d", ret);
542 return ret; 554 return ret;
543 555
556nobufs_unlock_dec:
557 atomic_dec(&object->n_reads);
544nobufs_unlock: 558nobufs_unlock:
545 spin_unlock(&cookie->lock); 559 spin_unlock(&cookie->lock);
546 kfree(op); 560 kfree(op);
@@ -583,6 +597,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
583 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 597 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
584 if (!op) 598 if (!op)
585 return -ENOMEM; 599 return -ENOMEM;
600 op->n_pages = 1;
586 601
587 spin_lock(&cookie->lock); 602 spin_lock(&cookie->lock);
588 603
@@ -696,6 +711,7 @@ static void fscache_write_op(struct fscache_operation *_op)
696 fscache_end_page_write(object, page); 711 fscache_end_page_write(object, page);
697 if (ret < 0) { 712 if (ret < 0) {
698 fscache_abort_object(object); 713 fscache_abort_object(object);
714 fscache_op_complete(&op->op);
699 } else { 715 } else {
700 fscache_enqueue_operation(&op->op); 716 fscache_enqueue_operation(&op->op);
701 } 717 }
@@ -710,6 +726,7 @@ superseded:
710 spin_unlock(&cookie->stores_lock); 726 spin_unlock(&cookie->stores_lock);
711 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 727 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
712 spin_unlock(&object->lock); 728 spin_unlock(&object->lock);
729 fscache_op_complete(&op->op);
713 _leave(""); 730 _leave("");
714} 731}
715 732