diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2015-06-23 18:01:30 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2015-06-23 18:01:30 -0400 |
commit | 8ea3a7c0df05b2cb33e2d63aa1c964308724b1c4 (patch) | |
tree | 8fbd1ba22b5ef12c7434316b975a077aabb4cfa6 | |
parent | a6de82cab123beaf9406024943caa0242f0618b0 (diff) | |
parent | 4a47132ff472a0c2c5441baeb50cf97f2580bc43 (diff) |
Merge branch 'fscache-fixes' into for-next
-rw-r--r-- | Documentation/filesystems/caching/backend-api.txt | 23 | ||||
-rw-r--r-- | Documentation/filesystems/caching/fscache.txt | 7 | ||||
-rw-r--r-- | fs/cachefiles/internal.h | 1 | ||||
-rw-r--r-- | fs/cachefiles/namei.c | 33 | ||||
-rw-r--r-- | fs/fscache/cookie.c | 8 | ||||
-rw-r--r-- | fs/fscache/internal.h | 12 | ||||
-rw-r--r-- | fs/fscache/object.c | 69 | ||||
-rw-r--r-- | fs/fscache/operation.c | 254 | ||||
-rw-r--r-- | fs/fscache/page.c | 86 | ||||
-rw-r--r-- | fs/fscache/stats.c | 14 | ||||
-rw-r--r-- | include/linux/fscache-cache.h | 55 |
11 files changed, 378 insertions, 184 deletions
diff --git a/Documentation/filesystems/caching/backend-api.txt b/Documentation/filesystems/caching/backend-api.txt index 277d1e810670..c0bd5677271b 100644 --- a/Documentation/filesystems/caching/backend-api.txt +++ b/Documentation/filesystems/caching/backend-api.txt | |||
@@ -676,6 +676,29 @@ FS-Cache provides some utilities that a cache backend may make use of: | |||
676 | as possible. | 676 | as possible. |
677 | 677 | ||
678 | 678 | ||
679 | (*) Indicate that a stale object was found and discarded: | ||
680 | |||
681 | void fscache_object_retrying_stale(struct fscache_object *object); | ||
682 | |||
683 | This is called to indicate that the lookup procedure found an object in | ||
684 | the cache that the netfs decided was stale. The object has been | ||
685 | discarded from the cache and the lookup will be performed again. | ||
686 | |||
687 | |||
688 | (*) Indicate that the caching backend killed an object: | ||
689 | |||
690 | void fscache_object_mark_killed(struct fscache_object *object, | ||
691 | enum fscache_why_object_killed why); | ||
692 | |||
693 | This is called to indicate that the cache backend preemptively killed an | ||
694 | object. The why parameter should be set to indicate the reason: | ||
695 | |||
696 | FSCACHE_OBJECT_IS_STALE - the object was stale and needs discarding. | ||
697 | FSCACHE_OBJECT_NO_SPACE - there was insufficient cache space | ||
698 | FSCACHE_OBJECT_WAS_RETIRED - the object was retired when relinquished. | ||
699 | FSCACHE_OBJECT_WAS_CULLED - the object was culled to make space. | ||
700 | |||
701 | |||
679 | (*) Get and release references on a retrieval record: | 702 | (*) Get and release references on a retrieval record: |
680 | 703 | ||
681 | void fscache_get_retrieval(struct fscache_retrieval *op); | 704 | void fscache_get_retrieval(struct fscache_retrieval *op); |
diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt index 770267af5b3e..50f0a5757f48 100644 --- a/Documentation/filesystems/caching/fscache.txt +++ b/Documentation/filesystems/caching/fscache.txt | |||
@@ -284,8 +284,9 @@ proc files. | |||
284 | enq=N Number of times async ops queued for processing | 284 | enq=N Number of times async ops queued for processing |
285 | can=N Number of async ops cancelled | 285 | can=N Number of async ops cancelled |
286 | rej=N Number of async ops rejected due to object lookup/create failure | 286 | rej=N Number of async ops rejected due to object lookup/create failure |
287 | ini=N Number of async ops initialised | ||
287 | dfr=N Number of async ops queued for deferred release | 288 | dfr=N Number of async ops queued for deferred release |
288 | rel=N Number of async ops released | 289 | rel=N Number of async ops released (should equal ini=N when idle) |
289 | gc=N Number of deferred-release async ops garbage collected | 290 | gc=N Number of deferred-release async ops garbage collected |
290 | CacheOp alo=N Number of in-progress alloc_object() cache ops | 291 | CacheOp alo=N Number of in-progress alloc_object() cache ops |
291 | luo=N Number of in-progress lookup_object() cache ops | 292 | luo=N Number of in-progress lookup_object() cache ops |
@@ -303,6 +304,10 @@ proc files. | |||
303 | wrp=N Number of in-progress write_page() cache ops | 304 | wrp=N Number of in-progress write_page() cache ops |
304 | ucp=N Number of in-progress uncache_page() cache ops | 305 | ucp=N Number of in-progress uncache_page() cache ops |
305 | dsp=N Number of in-progress dissociate_pages() cache ops | 306 | dsp=N Number of in-progress dissociate_pages() cache ops |
307 | CacheEv nsp=N Number of object lookups/creations rejected due to lack of space | ||
308 | stl=N Number of stale objects deleted | ||
309 | rtr=N Number of objects retired when relinquished | ||
310 | cul=N Number of objects culled | ||
306 | 311 | ||
307 | 312 | ||
308 | (*) /proc/fs/fscache/histogram | 313 | (*) /proc/fs/fscache/histogram |
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 8c52472d2efa..aecd0859eacb 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h | |||
@@ -43,7 +43,6 @@ struct cachefiles_object { | |||
43 | loff_t i_size; /* object size */ | 43 | loff_t i_size; /* object size */ |
44 | unsigned long flags; | 44 | unsigned long flags; |
45 | #define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */ | 45 | #define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */ |
46 | #define CACHEFILES_OBJECT_BURIED 1 /* T if preemptively buried */ | ||
47 | atomic_t usage; /* object usage count */ | 46 | atomic_t usage; /* object usage count */ |
48 | uint8_t type; /* object type */ | 47 | uint8_t type; /* object type */ |
49 | uint8_t new; /* T if object new */ | 48 | uint8_t new; /* T if object new */ |
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index ab857ab9f40d..fc1056f5c96a 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c | |||
@@ -97,7 +97,8 @@ static noinline void cachefiles_printk_object(struct cachefiles_object *object, | |||
97 | * call vfs_unlink(), vfs_rmdir() or vfs_rename() | 97 | * call vfs_unlink(), vfs_rmdir() or vfs_rename() |
98 | */ | 98 | */ |
99 | static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, | 99 | static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, |
100 | struct dentry *dentry) | 100 | struct dentry *dentry, |
101 | enum fscache_why_object_killed why) | ||
101 | { | 102 | { |
102 | struct cachefiles_object *object; | 103 | struct cachefiles_object *object; |
103 | struct rb_node *p; | 104 | struct rb_node *p; |
@@ -132,8 +133,9 @@ found_dentry: | |||
132 | pr_err("\n"); | 133 | pr_err("\n"); |
133 | pr_err("Error: Can't preemptively bury live object\n"); | 134 | pr_err("Error: Can't preemptively bury live object\n"); |
134 | cachefiles_printk_object(object, NULL); | 135 | cachefiles_printk_object(object, NULL); |
135 | } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { | 136 | } else { |
136 | pr_err("Error: Object already preemptively buried\n"); | 137 | if (why != FSCACHE_OBJECT_IS_STALE) |
138 | fscache_object_mark_killed(&object->fscache, why); | ||
137 | } | 139 | } |
138 | 140 | ||
139 | write_unlock(&cache->active_lock); | 141 | write_unlock(&cache->active_lock); |
@@ -265,7 +267,8 @@ requeue: | |||
265 | static int cachefiles_bury_object(struct cachefiles_cache *cache, | 267 | static int cachefiles_bury_object(struct cachefiles_cache *cache, |
266 | struct dentry *dir, | 268 | struct dentry *dir, |
267 | struct dentry *rep, | 269 | struct dentry *rep, |
268 | bool preemptive) | 270 | bool preemptive, |
271 | enum fscache_why_object_killed why) | ||
269 | { | 272 | { |
270 | struct dentry *grave, *trap; | 273 | struct dentry *grave, *trap; |
271 | struct path path, path_to_graveyard; | 274 | struct path path, path_to_graveyard; |
@@ -289,7 +292,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache, | |||
289 | ret = vfs_unlink(d_inode(dir), rep, NULL); | 292 | ret = vfs_unlink(d_inode(dir), rep, NULL); |
290 | 293 | ||
291 | if (preemptive) | 294 | if (preemptive) |
292 | cachefiles_mark_object_buried(cache, rep); | 295 | cachefiles_mark_object_buried(cache, rep, why); |
293 | } | 296 | } |
294 | 297 | ||
295 | mutex_unlock(&d_inode(dir)->i_mutex); | 298 | mutex_unlock(&d_inode(dir)->i_mutex); |
@@ -394,7 +397,7 @@ try_again: | |||
394 | "Rename failed with error %d", ret); | 397 | "Rename failed with error %d", ret); |
395 | 398 | ||
396 | if (preemptive) | 399 | if (preemptive) |
397 | cachefiles_mark_object_buried(cache, rep); | 400 | cachefiles_mark_object_buried(cache, rep, why); |
398 | } | 401 | } |
399 | 402 | ||
400 | unlock_rename(cache->graveyard, dir); | 403 | unlock_rename(cache->graveyard, dir); |
@@ -422,7 +425,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, | |||
422 | 425 | ||
423 | mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); | 426 | mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); |
424 | 427 | ||
425 | if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { | 428 | if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) { |
426 | /* object allocation for the same key preemptively deleted this | 429 | /* object allocation for the same key preemptively deleted this |
427 | * object's file so that it could create its own file */ | 430 | * object's file so that it could create its own file */ |
428 | _debug("object preemptively buried"); | 431 | _debug("object preemptively buried"); |
@@ -433,7 +436,8 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, | |||
433 | * may have been renamed */ | 436 | * may have been renamed */ |
434 | if (dir == object->dentry->d_parent) { | 437 | if (dir == object->dentry->d_parent) { |
435 | ret = cachefiles_bury_object(cache, dir, | 438 | ret = cachefiles_bury_object(cache, dir, |
436 | object->dentry, false); | 439 | object->dentry, false, |
440 | FSCACHE_OBJECT_WAS_RETIRED); | ||
437 | } else { | 441 | } else { |
438 | /* it got moved, presumably by cachefilesd culling it, | 442 | /* it got moved, presumably by cachefilesd culling it, |
439 | * so it's no longer in the key path and we can ignore | 443 | * so it's no longer in the key path and we can ignore |
@@ -522,7 +526,7 @@ lookup_again: | |||
522 | if (d_is_negative(next)) { | 526 | if (d_is_negative(next)) { |
523 | ret = cachefiles_has_space(cache, 1, 0); | 527 | ret = cachefiles_has_space(cache, 1, 0); |
524 | if (ret < 0) | 528 | if (ret < 0) |
525 | goto create_error; | 529 | goto no_space_error; |
526 | 530 | ||
527 | path.dentry = dir; | 531 | path.dentry = dir; |
528 | ret = security_path_mkdir(&path, next, 0); | 532 | ret = security_path_mkdir(&path, next, 0); |
@@ -551,7 +555,7 @@ lookup_again: | |||
551 | if (d_is_negative(next)) { | 555 | if (d_is_negative(next)) { |
552 | ret = cachefiles_has_space(cache, 1, 0); | 556 | ret = cachefiles_has_space(cache, 1, 0); |
553 | if (ret < 0) | 557 | if (ret < 0) |
554 | goto create_error; | 558 | goto no_space_error; |
555 | 559 | ||
556 | path.dentry = dir; | 560 | path.dentry = dir; |
557 | ret = security_path_mknod(&path, next, S_IFREG, 0); | 561 | ret = security_path_mknod(&path, next, S_IFREG, 0); |
@@ -602,7 +606,8 @@ lookup_again: | |||
602 | * mutex) */ | 606 | * mutex) */ |
603 | object->dentry = NULL; | 607 | object->dentry = NULL; |
604 | 608 | ||
605 | ret = cachefiles_bury_object(cache, dir, next, true); | 609 | ret = cachefiles_bury_object(cache, dir, next, true, |
610 | FSCACHE_OBJECT_IS_STALE); | ||
606 | dput(next); | 611 | dput(next); |
607 | next = NULL; | 612 | next = NULL; |
608 | 613 | ||
@@ -610,6 +615,7 @@ lookup_again: | |||
610 | goto delete_error; | 615 | goto delete_error; |
611 | 616 | ||
612 | _debug("redo lookup"); | 617 | _debug("redo lookup"); |
618 | fscache_object_retrying_stale(&object->fscache); | ||
613 | goto lookup_again; | 619 | goto lookup_again; |
614 | } | 620 | } |
615 | } | 621 | } |
@@ -662,6 +668,8 @@ lookup_again: | |||
662 | _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino); | 668 | _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino); |
663 | return 0; | 669 | return 0; |
664 | 670 | ||
671 | no_space_error: | ||
672 | fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE); | ||
665 | create_error: | 673 | create_error: |
666 | _debug("create error %d", ret); | 674 | _debug("create error %d", ret); |
667 | if (ret == -EIO) | 675 | if (ret == -EIO) |
@@ -927,7 +935,8 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, | |||
927 | /* actually remove the victim (drops the dir mutex) */ | 935 | /* actually remove the victim (drops the dir mutex) */ |
928 | _debug("bury"); | 936 | _debug("bury"); |
929 | 937 | ||
930 | ret = cachefiles_bury_object(cache, dir, victim, false); | 938 | ret = cachefiles_bury_object(cache, dir, victim, false, |
939 | FSCACHE_OBJECT_WAS_CULLED); | ||
931 | if (ret < 0) | 940 | if (ret < 0) |
932 | goto error; | 941 | goto error; |
933 | 942 | ||
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 89acec742e0b..d403c69bee08 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c | |||
@@ -327,7 +327,8 @@ static int fscache_alloc_object(struct fscache_cache *cache, | |||
327 | 327 | ||
328 | object_already_extant: | 328 | object_already_extant: |
329 | ret = -ENOBUFS; | 329 | ret = -ENOBUFS; |
330 | if (fscache_object_is_dead(object)) { | 330 | if (fscache_object_is_dying(object) || |
331 | fscache_cache_is_broken(object)) { | ||
331 | spin_unlock(&cookie->lock); | 332 | spin_unlock(&cookie->lock); |
332 | goto error; | 333 | goto error; |
333 | } | 334 | } |
@@ -671,7 +672,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie) | |||
671 | if (!op) | 672 | if (!op) |
672 | return -ENOMEM; | 673 | return -ENOMEM; |
673 | 674 | ||
674 | fscache_operation_init(op, NULL, NULL); | 675 | fscache_operation_init(op, NULL, NULL, NULL); |
675 | op->flags = FSCACHE_OP_MYTHREAD | | 676 | op->flags = FSCACHE_OP_MYTHREAD | |
676 | (1 << FSCACHE_OP_WAITING) | | 677 | (1 << FSCACHE_OP_WAITING) | |
677 | (1 << FSCACHE_OP_UNUSE_COOKIE); | 678 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
@@ -695,8 +696,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie) | |||
695 | /* the work queue now carries its own ref on the object */ | 696 | /* the work queue now carries its own ref on the object */ |
696 | spin_unlock(&cookie->lock); | 697 | spin_unlock(&cookie->lock); |
697 | 698 | ||
698 | ret = fscache_wait_for_operation_activation(object, op, | 699 | ret = fscache_wait_for_operation_activation(object, op, NULL, NULL); |
699 | NULL, NULL, NULL); | ||
700 | if (ret == 0) { | 700 | if (ret == 0) { |
701 | /* ask the cache to honour the operation */ | 701 | /* ask the cache to honour the operation */ |
702 | ret = object->cache->ops->check_consistency(op); | 702 | ret = object->cache->ops->check_consistency(op); |
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 7872a62ef30c..97ec45110957 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h | |||
@@ -124,8 +124,7 @@ extern int fscache_submit_exclusive_op(struct fscache_object *, | |||
124 | struct fscache_operation *); | 124 | struct fscache_operation *); |
125 | extern int fscache_submit_op(struct fscache_object *, | 125 | extern int fscache_submit_op(struct fscache_object *, |
126 | struct fscache_operation *); | 126 | struct fscache_operation *); |
127 | extern int fscache_cancel_op(struct fscache_operation *, | 127 | extern int fscache_cancel_op(struct fscache_operation *, bool); |
128 | void (*)(struct fscache_operation *)); | ||
129 | extern void fscache_cancel_all_ops(struct fscache_object *); | 128 | extern void fscache_cancel_all_ops(struct fscache_object *); |
130 | extern void fscache_abort_object(struct fscache_object *); | 129 | extern void fscache_abort_object(struct fscache_object *); |
131 | extern void fscache_start_operations(struct fscache_object *); | 130 | extern void fscache_start_operations(struct fscache_object *); |
@@ -138,8 +137,7 @@ extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *); | |||
138 | extern int fscache_wait_for_operation_activation(struct fscache_object *, | 137 | extern int fscache_wait_for_operation_activation(struct fscache_object *, |
139 | struct fscache_operation *, | 138 | struct fscache_operation *, |
140 | atomic_t *, | 139 | atomic_t *, |
141 | atomic_t *, | 140 | atomic_t *); |
142 | void (*)(struct fscache_operation *)); | ||
143 | extern void fscache_invalidate_writes(struct fscache_cookie *); | 141 | extern void fscache_invalidate_writes(struct fscache_cookie *); |
144 | 142 | ||
145 | /* | 143 | /* |
@@ -164,6 +162,7 @@ extern atomic_t fscache_n_op_pend; | |||
164 | extern atomic_t fscache_n_op_run; | 162 | extern atomic_t fscache_n_op_run; |
165 | extern atomic_t fscache_n_op_enqueue; | 163 | extern atomic_t fscache_n_op_enqueue; |
166 | extern atomic_t fscache_n_op_deferred_release; | 164 | extern atomic_t fscache_n_op_deferred_release; |
165 | extern atomic_t fscache_n_op_initialised; | ||
167 | extern atomic_t fscache_n_op_release; | 166 | extern atomic_t fscache_n_op_release; |
168 | extern atomic_t fscache_n_op_gc; | 167 | extern atomic_t fscache_n_op_gc; |
169 | extern atomic_t fscache_n_op_cancelled; | 168 | extern atomic_t fscache_n_op_cancelled; |
@@ -271,6 +270,11 @@ extern atomic_t fscache_n_cop_write_page; | |||
271 | extern atomic_t fscache_n_cop_uncache_page; | 270 | extern atomic_t fscache_n_cop_uncache_page; |
272 | extern atomic_t fscache_n_cop_dissociate_pages; | 271 | extern atomic_t fscache_n_cop_dissociate_pages; |
273 | 272 | ||
273 | extern atomic_t fscache_n_cache_no_space_reject; | ||
274 | extern atomic_t fscache_n_cache_stale_objects; | ||
275 | extern atomic_t fscache_n_cache_retired_objects; | ||
276 | extern atomic_t fscache_n_cache_culled_objects; | ||
277 | |||
274 | static inline void fscache_stat(atomic_t *stat) | 278 | static inline void fscache_stat(atomic_t *stat) |
275 | { | 279 | { |
276 | atomic_inc(stat); | 280 | atomic_inc(stat); |
diff --git a/fs/fscache/object.c b/fs/fscache/object.c index da032daf0e0d..9e792e30f4db 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c | |||
@@ -328,6 +328,17 @@ void fscache_object_init(struct fscache_object *object, | |||
328 | EXPORT_SYMBOL(fscache_object_init); | 328 | EXPORT_SYMBOL(fscache_object_init); |
329 | 329 | ||
330 | /* | 330 | /* |
331 | * Mark the object as no longer being live, making sure that we synchronise | ||
332 | * against op submission. | ||
333 | */ | ||
334 | static inline void fscache_mark_object_dead(struct fscache_object *object) | ||
335 | { | ||
336 | spin_lock(&object->lock); | ||
337 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | ||
338 | spin_unlock(&object->lock); | ||
339 | } | ||
340 | |||
341 | /* | ||
331 | * Abort object initialisation before we start it. | 342 | * Abort object initialisation before we start it. |
332 | */ | 343 | */ |
333 | static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, | 344 | static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, |
@@ -610,6 +621,8 @@ static const struct fscache_state *fscache_lookup_failure(struct fscache_object | |||
610 | object->cache->ops->lookup_complete(object); | 621 | object->cache->ops->lookup_complete(object); |
611 | fscache_stat_d(&fscache_n_cop_lookup_complete); | 622 | fscache_stat_d(&fscache_n_cop_lookup_complete); |
612 | 623 | ||
624 | set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags); | ||
625 | |||
613 | cookie = object->cookie; | 626 | cookie = object->cookie; |
614 | set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); | 627 | set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); |
615 | if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) | 628 | if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) |
@@ -629,7 +642,7 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob | |||
629 | _enter("{OBJ%x,%d,%d},%d", | 642 | _enter("{OBJ%x,%d,%d},%d", |
630 | object->debug_id, object->n_ops, object->n_children, event); | 643 | object->debug_id, object->n_ops, object->n_children, event); |
631 | 644 | ||
632 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | 645 | fscache_mark_object_dead(object); |
633 | object->oob_event_mask = 0; | 646 | object->oob_event_mask = 0; |
634 | 647 | ||
635 | if (list_empty(&object->dependents) && | 648 | if (list_empty(&object->dependents) && |
@@ -948,7 +961,8 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj | |||
948 | if (!op) | 961 | if (!op) |
949 | goto nomem; | 962 | goto nomem; |
950 | 963 | ||
951 | fscache_operation_init(op, object->cache->ops->invalidate_object, NULL); | 964 | fscache_operation_init(op, object->cache->ops->invalidate_object, |
965 | NULL, NULL); | ||
952 | op->flags = FSCACHE_OP_ASYNC | | 966 | op->flags = FSCACHE_OP_ASYNC | |
953 | (1 << FSCACHE_OP_EXCLUSIVE) | | 967 | (1 << FSCACHE_OP_EXCLUSIVE) | |
954 | (1 << FSCACHE_OP_UNUSE_COOKIE); | 968 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
@@ -974,13 +988,13 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj | |||
974 | return transit_to(UPDATE_OBJECT); | 988 | return transit_to(UPDATE_OBJECT); |
975 | 989 | ||
976 | nomem: | 990 | nomem: |
977 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | 991 | fscache_mark_object_dead(object); |
978 | fscache_unuse_cookie(object); | 992 | fscache_unuse_cookie(object); |
979 | _leave(" [ENOMEM]"); | 993 | _leave(" [ENOMEM]"); |
980 | return transit_to(KILL_OBJECT); | 994 | return transit_to(KILL_OBJECT); |
981 | 995 | ||
982 | submit_op_failed: | 996 | submit_op_failed: |
983 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | 997 | fscache_mark_object_dead(object); |
984 | spin_unlock(&cookie->lock); | 998 | spin_unlock(&cookie->lock); |
985 | fscache_unuse_cookie(object); | 999 | fscache_unuse_cookie(object); |
986 | kfree(op); | 1000 | kfree(op); |
@@ -1016,3 +1030,50 @@ static const struct fscache_state *fscache_update_object(struct fscache_object * | |||
1016 | _leave(""); | 1030 | _leave(""); |
1017 | return transit_to(WAIT_FOR_CMD); | 1031 | return transit_to(WAIT_FOR_CMD); |
1018 | } | 1032 | } |
1033 | |||
1034 | /** | ||
1035 | * fscache_object_retrying_stale - Note retrying stale object | ||
1036 | * @object: The object that will be retried | ||
1037 | * | ||
1038 | * Note that an object lookup found an on-disk object that was adjudged to be | ||
1039 | * stale and has been deleted. The lookup will be retried. | ||
1040 | */ | ||
1041 | void fscache_object_retrying_stale(struct fscache_object *object) | ||
1042 | { | ||
1043 | fscache_stat(&fscache_n_cache_no_space_reject); | ||
1044 | } | ||
1045 | EXPORT_SYMBOL(fscache_object_retrying_stale); | ||
1046 | |||
1047 | /** | ||
1048 | * fscache_object_mark_killed - Note that an object was killed | ||
1049 | * @object: The object that was culled | ||
1050 | * @why: The reason the object was killed. | ||
1051 | * | ||
1052 | * Note that an object was killed. Returns true if the object was | ||
1053 | * already marked killed, false if it wasn't. | ||
1054 | */ | ||
1055 | void fscache_object_mark_killed(struct fscache_object *object, | ||
1056 | enum fscache_why_object_killed why) | ||
1057 | { | ||
1058 | if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) { | ||
1059 | pr_err("Error: Object already killed by cache [%s]\n", | ||
1060 | object->cache->identifier); | ||
1061 | return; | ||
1062 | } | ||
1063 | |||
1064 | switch (why) { | ||
1065 | case FSCACHE_OBJECT_NO_SPACE: | ||
1066 | fscache_stat(&fscache_n_cache_no_space_reject); | ||
1067 | break; | ||
1068 | case FSCACHE_OBJECT_IS_STALE: | ||
1069 | fscache_stat(&fscache_n_cache_stale_objects); | ||
1070 | break; | ||
1071 | case FSCACHE_OBJECT_WAS_RETIRED: | ||
1072 | fscache_stat(&fscache_n_cache_retired_objects); | ||
1073 | break; | ||
1074 | case FSCACHE_OBJECT_WAS_CULLED: | ||
1075 | fscache_stat(&fscache_n_cache_culled_objects); | ||
1076 | break; | ||
1077 | } | ||
1078 | } | ||
1079 | EXPORT_SYMBOL(fscache_object_mark_killed); | ||
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index e7b87a0e5185..de67745e1cd7 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c | |||
@@ -20,6 +20,35 @@ | |||
20 | atomic_t fscache_op_debug_id; | 20 | atomic_t fscache_op_debug_id; |
21 | EXPORT_SYMBOL(fscache_op_debug_id); | 21 | EXPORT_SYMBOL(fscache_op_debug_id); |
22 | 22 | ||
23 | static void fscache_operation_dummy_cancel(struct fscache_operation *op) | ||
24 | { | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * fscache_operation_init - Do basic initialisation of an operation | ||
29 | * @op: The operation to initialise | ||
30 | * @release: The release function to assign | ||
31 | * | ||
32 | * Do basic initialisation of an operation. The caller must still set flags, | ||
33 | * object and processor if needed. | ||
34 | */ | ||
35 | void fscache_operation_init(struct fscache_operation *op, | ||
36 | fscache_operation_processor_t processor, | ||
37 | fscache_operation_cancel_t cancel, | ||
38 | fscache_operation_release_t release) | ||
39 | { | ||
40 | INIT_WORK(&op->work, fscache_op_work_func); | ||
41 | atomic_set(&op->usage, 1); | ||
42 | op->state = FSCACHE_OP_ST_INITIALISED; | ||
43 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | ||
44 | op->processor = processor; | ||
45 | op->cancel = cancel ?: fscache_operation_dummy_cancel; | ||
46 | op->release = release; | ||
47 | INIT_LIST_HEAD(&op->pend_link); | ||
48 | fscache_stat(&fscache_n_op_initialised); | ||
49 | } | ||
50 | EXPORT_SYMBOL(fscache_operation_init); | ||
51 | |||
23 | /** | 52 | /** |
24 | * fscache_enqueue_operation - Enqueue an operation for processing | 53 | * fscache_enqueue_operation - Enqueue an operation for processing |
25 | * @op: The operation to enqueue | 54 | * @op: The operation to enqueue |
@@ -76,6 +105,43 @@ static void fscache_run_op(struct fscache_object *object, | |||
76 | } | 105 | } |
77 | 106 | ||
78 | /* | 107 | /* |
108 | * report an unexpected submission | ||
109 | */ | ||
110 | static void fscache_report_unexpected_submission(struct fscache_object *object, | ||
111 | struct fscache_operation *op, | ||
112 | const struct fscache_state *ostate) | ||
113 | { | ||
114 | static bool once_only; | ||
115 | struct fscache_operation *p; | ||
116 | unsigned n; | ||
117 | |||
118 | if (once_only) | ||
119 | return; | ||
120 | once_only = true; | ||
121 | |||
122 | kdebug("unexpected submission OP%x [OBJ%x %s]", | ||
123 | op->debug_id, object->debug_id, object->state->name); | ||
124 | kdebug("objstate=%s [%s]", object->state->name, ostate->name); | ||
125 | kdebug("objflags=%lx", object->flags); | ||
126 | kdebug("objevent=%lx [%lx]", object->events, object->event_mask); | ||
127 | kdebug("ops=%u inp=%u exc=%u", | ||
128 | object->n_ops, object->n_in_progress, object->n_exclusive); | ||
129 | |||
130 | if (!list_empty(&object->pending_ops)) { | ||
131 | n = 0; | ||
132 | list_for_each_entry(p, &object->pending_ops, pend_link) { | ||
133 | ASSERTCMP(p->object, ==, object); | ||
134 | kdebug("%p %p", op->processor, op->release); | ||
135 | n++; | ||
136 | } | ||
137 | |||
138 | kdebug("n=%u", n); | ||
139 | } | ||
140 | |||
141 | dump_stack(); | ||
142 | } | ||
143 | |||
144 | /* | ||
79 | * submit an exclusive operation for an object | 145 | * submit an exclusive operation for an object |
80 | * - other ops are excluded from running simultaneously with this one | 146 | * - other ops are excluded from running simultaneously with this one |
81 | * - this gets any extra refs it needs on an op | 147 | * - this gets any extra refs it needs on an op |
@@ -83,6 +149,8 @@ static void fscache_run_op(struct fscache_object *object, | |||
83 | int fscache_submit_exclusive_op(struct fscache_object *object, | 149 | int fscache_submit_exclusive_op(struct fscache_object *object, |
84 | struct fscache_operation *op) | 150 | struct fscache_operation *op) |
85 | { | 151 | { |
152 | const struct fscache_state *ostate; | ||
153 | unsigned long flags; | ||
86 | int ret; | 154 | int ret; |
87 | 155 | ||
88 | _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); | 156 | _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); |
@@ -95,8 +163,21 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
95 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); | 163 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); |
96 | ASSERT(list_empty(&op->pend_link)); | 164 | ASSERT(list_empty(&op->pend_link)); |
97 | 165 | ||
166 | ostate = object->state; | ||
167 | smp_rmb(); | ||
168 | |||
98 | op->state = FSCACHE_OP_ST_PENDING; | 169 | op->state = FSCACHE_OP_ST_PENDING; |
99 | if (fscache_object_is_active(object)) { | 170 | flags = READ_ONCE(object->flags); |
171 | if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) { | ||
172 | fscache_stat(&fscache_n_op_rejected); | ||
173 | op->cancel(op); | ||
174 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
175 | ret = -ENOBUFS; | ||
176 | } else if (unlikely(fscache_cache_is_broken(object))) { | ||
177 | op->cancel(op); | ||
178 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
179 | ret = -EIO; | ||
180 | } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) { | ||
100 | op->object = object; | 181 | op->object = object; |
101 | object->n_ops++; | 182 | object->n_ops++; |
102 | object->n_exclusive++; /* reads and writes must wait */ | 183 | object->n_exclusive++; /* reads and writes must wait */ |
@@ -118,7 +199,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
118 | /* need to issue a new write op after this */ | 199 | /* need to issue a new write op after this */ |
119 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | 200 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); |
120 | ret = 0; | 201 | ret = 0; |
121 | } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { | 202 | } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) { |
122 | op->object = object; | 203 | op->object = object; |
123 | object->n_ops++; | 204 | object->n_ops++; |
124 | object->n_exclusive++; /* reads and writes must wait */ | 205 | object->n_exclusive++; /* reads and writes must wait */ |
@@ -126,12 +207,15 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
126 | list_add_tail(&op->pend_link, &object->pending_ops); | 207 | list_add_tail(&op->pend_link, &object->pending_ops); |
127 | fscache_stat(&fscache_n_op_pend); | 208 | fscache_stat(&fscache_n_op_pend); |
128 | ret = 0; | 209 | ret = 0; |
210 | } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) { | ||
211 | op->cancel(op); | ||
212 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
213 | ret = -ENOBUFS; | ||
129 | } else { | 214 | } else { |
130 | /* If we're in any other state, there must have been an I/O | 215 | fscache_report_unexpected_submission(object, op, ostate); |
131 | * error of some nature. | 216 | op->cancel(op); |
132 | */ | 217 | op->state = FSCACHE_OP_ST_CANCELLED; |
133 | ASSERT(test_bit(FSCACHE_IOERROR, &object->cache->flags)); | 218 | ret = -ENOBUFS; |
134 | ret = -EIO; | ||
135 | } | 219 | } |
136 | 220 | ||
137 | spin_unlock(&object->lock); | 221 | spin_unlock(&object->lock); |
@@ -139,43 +223,6 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
139 | } | 223 | } |
140 | 224 | ||
141 | /* | 225 | /* |
142 | * report an unexpected submission | ||
143 | */ | ||
144 | static void fscache_report_unexpected_submission(struct fscache_object *object, | ||
145 | struct fscache_operation *op, | ||
146 | const struct fscache_state *ostate) | ||
147 | { | ||
148 | static bool once_only; | ||
149 | struct fscache_operation *p; | ||
150 | unsigned n; | ||
151 | |||
152 | if (once_only) | ||
153 | return; | ||
154 | once_only = true; | ||
155 | |||
156 | kdebug("unexpected submission OP%x [OBJ%x %s]", | ||
157 | op->debug_id, object->debug_id, object->state->name); | ||
158 | kdebug("objstate=%s [%s]", object->state->name, ostate->name); | ||
159 | kdebug("objflags=%lx", object->flags); | ||
160 | kdebug("objevent=%lx [%lx]", object->events, object->event_mask); | ||
161 | kdebug("ops=%u inp=%u exc=%u", | ||
162 | object->n_ops, object->n_in_progress, object->n_exclusive); | ||
163 | |||
164 | if (!list_empty(&object->pending_ops)) { | ||
165 | n = 0; | ||
166 | list_for_each_entry(p, &object->pending_ops, pend_link) { | ||
167 | ASSERTCMP(p->object, ==, object); | ||
168 | kdebug("%p %p", op->processor, op->release); | ||
169 | n++; | ||
170 | } | ||
171 | |||
172 | kdebug("n=%u", n); | ||
173 | } | ||
174 | |||
175 | dump_stack(); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * submit an operation for an object | 226 | * submit an operation for an object |
180 | * - objects may be submitted only in the following states: | 227 | * - objects may be submitted only in the following states: |
181 | * - during object creation (write ops may be submitted) | 228 | * - during object creation (write ops may be submitted) |
@@ -187,6 +234,7 @@ int fscache_submit_op(struct fscache_object *object, | |||
187 | struct fscache_operation *op) | 234 | struct fscache_operation *op) |
188 | { | 235 | { |
189 | const struct fscache_state *ostate; | 236 | const struct fscache_state *ostate; |
237 | unsigned long flags; | ||
190 | int ret; | 238 | int ret; |
191 | 239 | ||
192 | _enter("{OBJ%x OP%x},{%u}", | 240 | _enter("{OBJ%x OP%x},{%u}", |
@@ -204,7 +252,17 @@ int fscache_submit_op(struct fscache_object *object, | |||
204 | smp_rmb(); | 252 | smp_rmb(); |
205 | 253 | ||
206 | op->state = FSCACHE_OP_ST_PENDING; | 254 | op->state = FSCACHE_OP_ST_PENDING; |
207 | if (fscache_object_is_active(object)) { | 255 | flags = READ_ONCE(object->flags); |
256 | if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) { | ||
257 | fscache_stat(&fscache_n_op_rejected); | ||
258 | op->cancel(op); | ||
259 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
260 | ret = -ENOBUFS; | ||
261 | } else if (unlikely(fscache_cache_is_broken(object))) { | ||
262 | op->cancel(op); | ||
263 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
264 | ret = -EIO; | ||
265 | } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) { | ||
208 | op->object = object; | 266 | op->object = object; |
209 | object->n_ops++; | 267 | object->n_ops++; |
210 | 268 | ||
@@ -222,23 +280,21 @@ int fscache_submit_op(struct fscache_object *object, | |||
222 | fscache_run_op(object, op); | 280 | fscache_run_op(object, op); |
223 | } | 281 | } |
224 | ret = 0; | 282 | ret = 0; |
225 | } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { | 283 | } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) { |
226 | op->object = object; | 284 | op->object = object; |
227 | object->n_ops++; | 285 | object->n_ops++; |
228 | atomic_inc(&op->usage); | 286 | atomic_inc(&op->usage); |
229 | list_add_tail(&op->pend_link, &object->pending_ops); | 287 | list_add_tail(&op->pend_link, &object->pending_ops); |
230 | fscache_stat(&fscache_n_op_pend); | 288 | fscache_stat(&fscache_n_op_pend); |
231 | ret = 0; | 289 | ret = 0; |
232 | } else if (fscache_object_is_dying(object)) { | 290 | } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) { |
233 | fscache_stat(&fscache_n_op_rejected); | 291 | op->cancel(op); |
234 | op->state = FSCACHE_OP_ST_CANCELLED; | 292 | op->state = FSCACHE_OP_ST_CANCELLED; |
235 | ret = -ENOBUFS; | 293 | ret = -ENOBUFS; |
236 | } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { | 294 | } else { |
237 | fscache_report_unexpected_submission(object, op, ostate); | 295 | fscache_report_unexpected_submission(object, op, ostate); |
238 | ASSERT(!fscache_object_is_active(object)); | 296 | ASSERT(!fscache_object_is_active(object)); |
239 | op->state = FSCACHE_OP_ST_CANCELLED; | 297 | op->cancel(op); |
240 | ret = -ENOBUFS; | ||
241 | } else { | ||
242 | op->state = FSCACHE_OP_ST_CANCELLED; | 298 | op->state = FSCACHE_OP_ST_CANCELLED; |
243 | ret = -ENOBUFS; | 299 | ret = -ENOBUFS; |
244 | } | 300 | } |
@@ -293,9 +349,10 @@ void fscache_start_operations(struct fscache_object *object) | |||
293 | * cancel an operation that's pending on an object | 349 | * cancel an operation that's pending on an object |
294 | */ | 350 | */ |
295 | int fscache_cancel_op(struct fscache_operation *op, | 351 | int fscache_cancel_op(struct fscache_operation *op, |
296 | void (*do_cancel)(struct fscache_operation *)) | 352 | bool cancel_in_progress_op) |
297 | { | 353 | { |
298 | struct fscache_object *object = op->object; | 354 | struct fscache_object *object = op->object; |
355 | bool put = false; | ||
299 | int ret; | 356 | int ret; |
300 | 357 | ||
301 | _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); | 358 | _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); |
@@ -309,19 +366,37 @@ int fscache_cancel_op(struct fscache_operation *op, | |||
309 | ret = -EBUSY; | 366 | ret = -EBUSY; |
310 | if (op->state == FSCACHE_OP_ST_PENDING) { | 367 | if (op->state == FSCACHE_OP_ST_PENDING) { |
311 | ASSERT(!list_empty(&op->pend_link)); | 368 | ASSERT(!list_empty(&op->pend_link)); |
312 | fscache_stat(&fscache_n_op_cancelled); | ||
313 | list_del_init(&op->pend_link); | 369 | list_del_init(&op->pend_link); |
314 | if (do_cancel) | 370 | put = true; |
315 | do_cancel(op); | 371 | |
372 | fscache_stat(&fscache_n_op_cancelled); | ||
373 | op->cancel(op); | ||
374 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
375 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | ||
376 | object->n_exclusive--; | ||
377 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) | ||
378 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); | ||
379 | ret = 0; | ||
380 | } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) { | ||
381 | ASSERTCMP(object->n_in_progress, >, 0); | ||
382 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | ||
383 | object->n_exclusive--; | ||
384 | object->n_in_progress--; | ||
385 | if (object->n_in_progress == 0) | ||
386 | fscache_start_operations(object); | ||
387 | |||
388 | fscache_stat(&fscache_n_op_cancelled); | ||
389 | op->cancel(op); | ||
316 | op->state = FSCACHE_OP_ST_CANCELLED; | 390 | op->state = FSCACHE_OP_ST_CANCELLED; |
317 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | 391 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) |
318 | object->n_exclusive--; | 392 | object->n_exclusive--; |
319 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) | 393 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) |
320 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); | 394 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); |
321 | fscache_put_operation(op); | ||
322 | ret = 0; | 395 | ret = 0; |
323 | } | 396 | } |
324 | 397 | ||
398 | if (put) | ||
399 | fscache_put_operation(op); | ||
325 | spin_unlock(&object->lock); | 400 | spin_unlock(&object->lock); |
326 | _leave(" = %d", ret); | 401 | _leave(" = %d", ret); |
327 | return ret; | 402 | return ret; |
@@ -345,6 +420,7 @@ void fscache_cancel_all_ops(struct fscache_object *object) | |||
345 | list_del_init(&op->pend_link); | 420 | list_del_init(&op->pend_link); |
346 | 421 | ||
347 | ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); | 422 | ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); |
423 | op->cancel(op); | ||
348 | op->state = FSCACHE_OP_ST_CANCELLED; | 424 | op->state = FSCACHE_OP_ST_CANCELLED; |
349 | 425 | ||
350 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | 426 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) |
@@ -377,8 +453,12 @@ void fscache_op_complete(struct fscache_operation *op, bool cancelled) | |||
377 | 453 | ||
378 | spin_lock(&object->lock); | 454 | spin_lock(&object->lock); |
379 | 455 | ||
380 | op->state = cancelled ? | 456 | if (!cancelled) { |
381 | FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE; | 457 | op->state = FSCACHE_OP_ST_COMPLETE; |
458 | } else { | ||
459 | op->cancel(op); | ||
460 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
461 | } | ||
382 | 462 | ||
383 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | 463 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) |
384 | object->n_exclusive--; | 464 | object->n_exclusive--; |
@@ -409,9 +489,9 @@ void fscache_put_operation(struct fscache_operation *op) | |||
409 | return; | 489 | return; |
410 | 490 | ||
411 | _debug("PUT OP"); | 491 | _debug("PUT OP"); |
412 | ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE, | 492 | ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED && |
493 | op->state != FSCACHE_OP_ST_COMPLETE, | ||
413 | op->state, ==, FSCACHE_OP_ST_CANCELLED); | 494 | op->state, ==, FSCACHE_OP_ST_CANCELLED); |
414 | op->state = FSCACHE_OP_ST_DEAD; | ||
415 | 495 | ||
416 | fscache_stat(&fscache_n_op_release); | 496 | fscache_stat(&fscache_n_op_release); |
417 | 497 | ||
@@ -419,37 +499,39 @@ void fscache_put_operation(struct fscache_operation *op) | |||
419 | op->release(op); | 499 | op->release(op); |
420 | op->release = NULL; | 500 | op->release = NULL; |
421 | } | 501 | } |
502 | op->state = FSCACHE_OP_ST_DEAD; | ||
422 | 503 | ||
423 | object = op->object; | 504 | object = op->object; |
505 | if (likely(object)) { | ||
506 | if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) | ||
507 | atomic_dec(&object->n_reads); | ||
508 | if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags)) | ||
509 | fscache_unuse_cookie(object); | ||
510 | |||
511 | /* now... we may get called with the object spinlock held, so we | ||
512 | * complete the cleanup here only if we can immediately acquire the | ||
513 | * lock, and defer it otherwise */ | ||
514 | if (!spin_trylock(&object->lock)) { | ||
515 | _debug("defer put"); | ||
516 | fscache_stat(&fscache_n_op_deferred_release); | ||
517 | |||
518 | cache = object->cache; | ||
519 | spin_lock(&cache->op_gc_list_lock); | ||
520 | list_add_tail(&op->pend_link, &cache->op_gc_list); | ||
521 | spin_unlock(&cache->op_gc_list_lock); | ||
522 | schedule_work(&cache->op_gc); | ||
523 | _leave(" [defer]"); | ||
524 | return; | ||
525 | } | ||
424 | 526 | ||
425 | if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) | 527 | ASSERTCMP(object->n_ops, >, 0); |
426 | atomic_dec(&object->n_reads); | 528 | object->n_ops--; |
427 | if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags)) | 529 | if (object->n_ops == 0) |
428 | fscache_unuse_cookie(object); | 530 | fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); |
429 | |||
430 | /* now... we may get called with the object spinlock held, so we | ||
431 | * complete the cleanup here only if we can immediately acquire the | ||
432 | * lock, and defer it otherwise */ | ||
433 | if (!spin_trylock(&object->lock)) { | ||
434 | _debug("defer put"); | ||
435 | fscache_stat(&fscache_n_op_deferred_release); | ||
436 | 531 | ||
437 | cache = object->cache; | 532 | spin_unlock(&object->lock); |
438 | spin_lock(&cache->op_gc_list_lock); | ||
439 | list_add_tail(&op->pend_link, &cache->op_gc_list); | ||
440 | spin_unlock(&cache->op_gc_list_lock); | ||
441 | schedule_work(&cache->op_gc); | ||
442 | _leave(" [defer]"); | ||
443 | return; | ||
444 | } | 533 | } |
445 | 534 | ||
446 | ASSERTCMP(object->n_ops, >, 0); | ||
447 | object->n_ops--; | ||
448 | if (object->n_ops == 0) | ||
449 | fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); | ||
450 | |||
451 | spin_unlock(&object->lock); | ||
452 | |||
453 | kfree(op); | 535 | kfree(op); |
454 | _leave(" [done]"); | 536 | _leave(" [done]"); |
455 | } | 537 | } |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index de33b3fccca6..483bbc613bf0 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -213,7 +213,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) | |||
213 | return -ENOMEM; | 213 | return -ENOMEM; |
214 | } | 214 | } |
215 | 215 | ||
216 | fscache_operation_init(op, fscache_attr_changed_op, NULL); | 216 | fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL); |
217 | op->flags = FSCACHE_OP_ASYNC | | 217 | op->flags = FSCACHE_OP_ASYNC | |
218 | (1 << FSCACHE_OP_EXCLUSIVE) | | 218 | (1 << FSCACHE_OP_EXCLUSIVE) | |
219 | (1 << FSCACHE_OP_UNUSE_COOKIE); | 219 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
@@ -239,7 +239,7 @@ nobufs_dec: | |||
239 | wake_cookie = __fscache_unuse_cookie(cookie); | 239 | wake_cookie = __fscache_unuse_cookie(cookie); |
240 | nobufs: | 240 | nobufs: |
241 | spin_unlock(&cookie->lock); | 241 | spin_unlock(&cookie->lock); |
242 | kfree(op); | 242 | fscache_put_operation(op); |
243 | if (wake_cookie) | 243 | if (wake_cookie) |
244 | __fscache_wake_unused_cookie(cookie); | 244 | __fscache_wake_unused_cookie(cookie); |
245 | fscache_stat(&fscache_n_attr_changed_nobufs); | 245 | fscache_stat(&fscache_n_attr_changed_nobufs); |
@@ -249,6 +249,17 @@ nobufs: | |||
249 | EXPORT_SYMBOL(__fscache_attr_changed); | 249 | EXPORT_SYMBOL(__fscache_attr_changed); |
250 | 250 | ||
251 | /* | 251 | /* |
252 | * Handle cancellation of a pending retrieval op | ||
253 | */ | ||
254 | static void fscache_do_cancel_retrieval(struct fscache_operation *_op) | ||
255 | { | ||
256 | struct fscache_retrieval *op = | ||
257 | container_of(_op, struct fscache_retrieval, op); | ||
258 | |||
259 | atomic_set(&op->n_pages, 0); | ||
260 | } | ||
261 | |||
262 | /* | ||
252 | * release a retrieval op reference | 263 | * release a retrieval op reference |
253 | */ | 264 | */ |
254 | static void fscache_release_retrieval_op(struct fscache_operation *_op) | 265 | static void fscache_release_retrieval_op(struct fscache_operation *_op) |
@@ -258,11 +269,12 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op) | |||
258 | 269 | ||
259 | _enter("{OP%x}", op->op.debug_id); | 270 | _enter("{OP%x}", op->op.debug_id); |
260 | 271 | ||
261 | ASSERTCMP(atomic_read(&op->n_pages), ==, 0); | 272 | ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED, |
273 | atomic_read(&op->n_pages), ==, 0); | ||
262 | 274 | ||
263 | fscache_hist(fscache_retrieval_histogram, op->start_time); | 275 | fscache_hist(fscache_retrieval_histogram, op->start_time); |
264 | if (op->context) | 276 | if (op->context) |
265 | fscache_put_context(op->op.object->cookie, op->context); | 277 | fscache_put_context(op->cookie, op->context); |
266 | 278 | ||
267 | _leave(""); | 279 | _leave(""); |
268 | } | 280 | } |
@@ -285,15 +297,24 @@ static struct fscache_retrieval *fscache_alloc_retrieval( | |||
285 | return NULL; | 297 | return NULL; |
286 | } | 298 | } |
287 | 299 | ||
288 | fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op); | 300 | fscache_operation_init(&op->op, NULL, |
301 | fscache_do_cancel_retrieval, | ||
302 | fscache_release_retrieval_op); | ||
289 | op->op.flags = FSCACHE_OP_MYTHREAD | | 303 | op->op.flags = FSCACHE_OP_MYTHREAD | |
290 | (1UL << FSCACHE_OP_WAITING) | | 304 | (1UL << FSCACHE_OP_WAITING) | |
291 | (1UL << FSCACHE_OP_UNUSE_COOKIE); | 305 | (1UL << FSCACHE_OP_UNUSE_COOKIE); |
306 | op->cookie = cookie; | ||
292 | op->mapping = mapping; | 307 | op->mapping = mapping; |
293 | op->end_io_func = end_io_func; | 308 | op->end_io_func = end_io_func; |
294 | op->context = context; | 309 | op->context = context; |
295 | op->start_time = jiffies; | 310 | op->start_time = jiffies; |
296 | INIT_LIST_HEAD(&op->to_do); | 311 | INIT_LIST_HEAD(&op->to_do); |
312 | |||
313 | /* Pin the netfs read context in case we need to do the actual netfs | ||
314 | * read because we've encountered a cache read failure. | ||
315 | */ | ||
316 | if (context) | ||
317 | fscache_get_context(op->cookie, context); | ||
297 | return op; | 318 | return op; |
298 | } | 319 | } |
299 | 320 | ||
@@ -330,24 +351,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) | |||
330 | } | 351 | } |
331 | 352 | ||
332 | /* | 353 | /* |
333 | * Handle cancellation of a pending retrieval op | ||
334 | */ | ||
335 | static void fscache_do_cancel_retrieval(struct fscache_operation *_op) | ||
336 | { | ||
337 | struct fscache_retrieval *op = | ||
338 | container_of(_op, struct fscache_retrieval, op); | ||
339 | |||
340 | atomic_set(&op->n_pages, 0); | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * wait for an object to become active (or dead) | 354 | * wait for an object to become active (or dead) |
345 | */ | 355 | */ |
346 | int fscache_wait_for_operation_activation(struct fscache_object *object, | 356 | int fscache_wait_for_operation_activation(struct fscache_object *object, |
347 | struct fscache_operation *op, | 357 | struct fscache_operation *op, |
348 | atomic_t *stat_op_waits, | 358 | atomic_t *stat_op_waits, |
349 | atomic_t *stat_object_dead, | 359 | atomic_t *stat_object_dead) |
350 | void (*do_cancel)(struct fscache_operation *)) | ||
351 | { | 360 | { |
352 | int ret; | 361 | int ret; |
353 | 362 | ||
@@ -359,7 +368,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object, | |||
359 | fscache_stat(stat_op_waits); | 368 | fscache_stat(stat_op_waits); |
360 | if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, | 369 | if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, |
361 | TASK_INTERRUPTIBLE) != 0) { | 370 | TASK_INTERRUPTIBLE) != 0) { |
362 | ret = fscache_cancel_op(op, do_cancel); | 371 | ret = fscache_cancel_op(op, false); |
363 | if (ret == 0) | 372 | if (ret == 0) |
364 | return -ERESTARTSYS; | 373 | return -ERESTARTSYS; |
365 | 374 | ||
@@ -377,11 +386,13 @@ check_if_dead: | |||
377 | _leave(" = -ENOBUFS [cancelled]"); | 386 | _leave(" = -ENOBUFS [cancelled]"); |
378 | return -ENOBUFS; | 387 | return -ENOBUFS; |
379 | } | 388 | } |
380 | if (unlikely(fscache_object_is_dead(object))) { | 389 | if (unlikely(fscache_object_is_dying(object) || |
381 | pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state); | 390 | fscache_cache_is_broken(object))) { |
382 | fscache_cancel_op(op, do_cancel); | 391 | enum fscache_operation_state state = op->state; |
392 | fscache_cancel_op(op, true); | ||
383 | if (stat_object_dead) | 393 | if (stat_object_dead) |
384 | fscache_stat(stat_object_dead); | 394 | fscache_stat(stat_object_dead); |
395 | _leave(" = -ENOBUFS [obj dead %d]", state); | ||
385 | return -ENOBUFS; | 396 | return -ENOBUFS; |
386 | } | 397 | } |
387 | return 0; | 398 | return 0; |
@@ -453,17 +464,12 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, | |||
453 | 464 | ||
454 | fscache_stat(&fscache_n_retrieval_ops); | 465 | fscache_stat(&fscache_n_retrieval_ops); |
455 | 466 | ||
456 | /* pin the netfs read context in case we need to do the actual netfs | ||
457 | * read because we've encountered a cache read failure */ | ||
458 | fscache_get_context(object->cookie, op->context); | ||
459 | |||
460 | /* we wait for the operation to become active, and then process it | 467 | /* we wait for the operation to become active, and then process it |
461 | * *here*, in this thread, and not in the thread pool */ | 468 | * *here*, in this thread, and not in the thread pool */ |
462 | ret = fscache_wait_for_operation_activation( | 469 | ret = fscache_wait_for_operation_activation( |
463 | object, &op->op, | 470 | object, &op->op, |
464 | __fscache_stat(&fscache_n_retrieval_op_waits), | 471 | __fscache_stat(&fscache_n_retrieval_op_waits), |
465 | __fscache_stat(&fscache_n_retrievals_object_dead), | 472 | __fscache_stat(&fscache_n_retrievals_object_dead)); |
466 | fscache_do_cancel_retrieval); | ||
467 | if (ret < 0) | 473 | if (ret < 0) |
468 | goto error; | 474 | goto error; |
469 | 475 | ||
@@ -503,7 +509,7 @@ nobufs_unlock: | |||
503 | spin_unlock(&cookie->lock); | 509 | spin_unlock(&cookie->lock); |
504 | if (wake_cookie) | 510 | if (wake_cookie) |
505 | __fscache_wake_unused_cookie(cookie); | 511 | __fscache_wake_unused_cookie(cookie); |
506 | kfree(op); | 512 | fscache_put_retrieval(op); |
507 | nobufs: | 513 | nobufs: |
508 | fscache_stat(&fscache_n_retrievals_nobufs); | 514 | fscache_stat(&fscache_n_retrievals_nobufs); |
509 | _leave(" = -ENOBUFS"); | 515 | _leave(" = -ENOBUFS"); |
@@ -584,17 +590,12 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, | |||
584 | 590 | ||
585 | fscache_stat(&fscache_n_retrieval_ops); | 591 | fscache_stat(&fscache_n_retrieval_ops); |
586 | 592 | ||
587 | /* pin the netfs read context in case we need to do the actual netfs | ||
588 | * read because we've encountered a cache read failure */ | ||
589 | fscache_get_context(object->cookie, op->context); | ||
590 | |||
591 | /* we wait for the operation to become active, and then process it | 593 | /* we wait for the operation to become active, and then process it |
592 | * *here*, in this thread, and not in the thread pool */ | 594 | * *here*, in this thread, and not in the thread pool */ |
593 | ret = fscache_wait_for_operation_activation( | 595 | ret = fscache_wait_for_operation_activation( |
594 | object, &op->op, | 596 | object, &op->op, |
595 | __fscache_stat(&fscache_n_retrieval_op_waits), | 597 | __fscache_stat(&fscache_n_retrieval_op_waits), |
596 | __fscache_stat(&fscache_n_retrievals_object_dead), | 598 | __fscache_stat(&fscache_n_retrievals_object_dead)); |
597 | fscache_do_cancel_retrieval); | ||
598 | if (ret < 0) | 599 | if (ret < 0) |
599 | goto error; | 600 | goto error; |
600 | 601 | ||
@@ -632,7 +633,7 @@ nobufs_unlock_dec: | |||
632 | wake_cookie = __fscache_unuse_cookie(cookie); | 633 | wake_cookie = __fscache_unuse_cookie(cookie); |
633 | nobufs_unlock: | 634 | nobufs_unlock: |
634 | spin_unlock(&cookie->lock); | 635 | spin_unlock(&cookie->lock); |
635 | kfree(op); | 636 | fscache_put_retrieval(op); |
636 | if (wake_cookie) | 637 | if (wake_cookie) |
637 | __fscache_wake_unused_cookie(cookie); | 638 | __fscache_wake_unused_cookie(cookie); |
638 | nobufs: | 639 | nobufs: |
@@ -700,8 +701,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, | |||
700 | ret = fscache_wait_for_operation_activation( | 701 | ret = fscache_wait_for_operation_activation( |
701 | object, &op->op, | 702 | object, &op->op, |
702 | __fscache_stat(&fscache_n_alloc_op_waits), | 703 | __fscache_stat(&fscache_n_alloc_op_waits), |
703 | __fscache_stat(&fscache_n_allocs_object_dead), | 704 | __fscache_stat(&fscache_n_allocs_object_dead)); |
704 | fscache_do_cancel_retrieval); | ||
705 | if (ret < 0) | 705 | if (ret < 0) |
706 | goto error; | 706 | goto error; |
707 | 707 | ||
@@ -726,7 +726,7 @@ nobufs_unlock_dec: | |||
726 | wake_cookie = __fscache_unuse_cookie(cookie); | 726 | wake_cookie = __fscache_unuse_cookie(cookie); |
727 | nobufs_unlock: | 727 | nobufs_unlock: |
728 | spin_unlock(&cookie->lock); | 728 | spin_unlock(&cookie->lock); |
729 | kfree(op); | 729 | fscache_put_retrieval(op); |
730 | if (wake_cookie) | 730 | if (wake_cookie) |
731 | __fscache_wake_unused_cookie(cookie); | 731 | __fscache_wake_unused_cookie(cookie); |
732 | nobufs: | 732 | nobufs: |
@@ -944,7 +944,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, | |||
944 | if (!op) | 944 | if (!op) |
945 | goto nomem; | 945 | goto nomem; |
946 | 946 | ||
947 | fscache_operation_init(&op->op, fscache_write_op, | 947 | fscache_operation_init(&op->op, fscache_write_op, NULL, |
948 | fscache_release_write_op); | 948 | fscache_release_write_op); |
949 | op->op.flags = FSCACHE_OP_ASYNC | | 949 | op->op.flags = FSCACHE_OP_ASYNC | |
950 | (1 << FSCACHE_OP_WAITING) | | 950 | (1 << FSCACHE_OP_WAITING) | |
@@ -1016,7 +1016,7 @@ already_pending: | |||
1016 | spin_unlock(&object->lock); | 1016 | spin_unlock(&object->lock); |
1017 | spin_unlock(&cookie->lock); | 1017 | spin_unlock(&cookie->lock); |
1018 | radix_tree_preload_end(); | 1018 | radix_tree_preload_end(); |
1019 | kfree(op); | 1019 | fscache_put_operation(&op->op); |
1020 | fscache_stat(&fscache_n_stores_ok); | 1020 | fscache_stat(&fscache_n_stores_ok); |
1021 | _leave(" = 0"); | 1021 | _leave(" = 0"); |
1022 | return 0; | 1022 | return 0; |
@@ -1036,7 +1036,7 @@ nobufs_unlock_obj: | |||
1036 | nobufs: | 1036 | nobufs: |
1037 | spin_unlock(&cookie->lock); | 1037 | spin_unlock(&cookie->lock); |
1038 | radix_tree_preload_end(); | 1038 | radix_tree_preload_end(); |
1039 | kfree(op); | 1039 | fscache_put_operation(&op->op); |
1040 | if (wake_cookie) | 1040 | if (wake_cookie) |
1041 | __fscache_wake_unused_cookie(cookie); | 1041 | __fscache_wake_unused_cookie(cookie); |
1042 | fscache_stat(&fscache_n_stores_nobufs); | 1042 | fscache_stat(&fscache_n_stores_nobufs); |
@@ -1044,7 +1044,7 @@ nobufs: | |||
1044 | return -ENOBUFS; | 1044 | return -ENOBUFS; |
1045 | 1045 | ||
1046 | nomem_free: | 1046 | nomem_free: |
1047 | kfree(op); | 1047 | fscache_put_operation(&op->op); |
1048 | nomem: | 1048 | nomem: |
1049 | fscache_stat(&fscache_n_stores_oom); | 1049 | fscache_stat(&fscache_n_stores_oom); |
1050 | _leave(" = -ENOMEM"); | 1050 | _leave(" = -ENOMEM"); |
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 40d13c70ef51..7cfa0aacdf6d 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c | |||
@@ -23,6 +23,7 @@ atomic_t fscache_n_op_run; | |||
23 | atomic_t fscache_n_op_enqueue; | 23 | atomic_t fscache_n_op_enqueue; |
24 | atomic_t fscache_n_op_requeue; | 24 | atomic_t fscache_n_op_requeue; |
25 | atomic_t fscache_n_op_deferred_release; | 25 | atomic_t fscache_n_op_deferred_release; |
26 | atomic_t fscache_n_op_initialised; | ||
26 | atomic_t fscache_n_op_release; | 27 | atomic_t fscache_n_op_release; |
27 | atomic_t fscache_n_op_gc; | 28 | atomic_t fscache_n_op_gc; |
28 | atomic_t fscache_n_op_cancelled; | 29 | atomic_t fscache_n_op_cancelled; |
@@ -130,6 +131,11 @@ atomic_t fscache_n_cop_write_page; | |||
130 | atomic_t fscache_n_cop_uncache_page; | 131 | atomic_t fscache_n_cop_uncache_page; |
131 | atomic_t fscache_n_cop_dissociate_pages; | 132 | atomic_t fscache_n_cop_dissociate_pages; |
132 | 133 | ||
134 | atomic_t fscache_n_cache_no_space_reject; | ||
135 | atomic_t fscache_n_cache_stale_objects; | ||
136 | atomic_t fscache_n_cache_retired_objects; | ||
137 | atomic_t fscache_n_cache_culled_objects; | ||
138 | |||
133 | /* | 139 | /* |
134 | * display the general statistics | 140 | * display the general statistics |
135 | */ | 141 | */ |
@@ -246,7 +252,8 @@ static int fscache_stats_show(struct seq_file *m, void *v) | |||
246 | atomic_read(&fscache_n_op_enqueue), | 252 | atomic_read(&fscache_n_op_enqueue), |
247 | atomic_read(&fscache_n_op_cancelled), | 253 | atomic_read(&fscache_n_op_cancelled), |
248 | atomic_read(&fscache_n_op_rejected)); | 254 | atomic_read(&fscache_n_op_rejected)); |
249 | seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", | 255 | seq_printf(m, "Ops : ini=%u dfr=%u rel=%u gc=%u\n", |
256 | atomic_read(&fscache_n_op_initialised), | ||
250 | atomic_read(&fscache_n_op_deferred_release), | 257 | atomic_read(&fscache_n_op_deferred_release), |
251 | atomic_read(&fscache_n_op_release), | 258 | atomic_read(&fscache_n_op_release), |
252 | atomic_read(&fscache_n_op_gc)); | 259 | atomic_read(&fscache_n_op_gc)); |
@@ -271,6 +278,11 @@ static int fscache_stats_show(struct seq_file *m, void *v) | |||
271 | atomic_read(&fscache_n_cop_write_page), | 278 | atomic_read(&fscache_n_cop_write_page), |
272 | atomic_read(&fscache_n_cop_uncache_page), | 279 | atomic_read(&fscache_n_cop_uncache_page), |
273 | atomic_read(&fscache_n_cop_dissociate_pages)); | 280 | atomic_read(&fscache_n_cop_dissociate_pages)); |
281 | seq_printf(m, "CacheEv: nsp=%d stl=%d rtr=%d cul=%d\n", | ||
282 | atomic_read(&fscache_n_cache_no_space_reject), | ||
283 | atomic_read(&fscache_n_cache_stale_objects), | ||
284 | atomic_read(&fscache_n_cache_retired_objects), | ||
285 | atomic_read(&fscache_n_cache_culled_objects)); | ||
274 | return 0; | 286 | return 0; |
275 | } | 287 | } |
276 | 288 | ||
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 771484993ca7..604e1526cd00 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
@@ -74,6 +74,7 @@ extern wait_queue_head_t fscache_cache_cleared_wq; | |||
74 | */ | 74 | */ |
75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | 75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); |
76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); |
77 | typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op); | ||
77 | 78 | ||
78 | enum fscache_operation_state { | 79 | enum fscache_operation_state { |
79 | FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ | 80 | FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ |
@@ -109,6 +110,9 @@ struct fscache_operation { | |||
109 | * the op in a non-pool thread */ | 110 | * the op in a non-pool thread */ |
110 | fscache_operation_processor_t processor; | 111 | fscache_operation_processor_t processor; |
111 | 112 | ||
113 | /* Operation cancellation cleanup (optional) */ | ||
114 | fscache_operation_cancel_t cancel; | ||
115 | |||
112 | /* operation releaser */ | 116 | /* operation releaser */ |
113 | fscache_operation_release_t release; | 117 | fscache_operation_release_t release; |
114 | }; | 118 | }; |
@@ -119,33 +123,17 @@ extern void fscache_op_work_func(struct work_struct *work); | |||
119 | extern void fscache_enqueue_operation(struct fscache_operation *); | 123 | extern void fscache_enqueue_operation(struct fscache_operation *); |
120 | extern void fscache_op_complete(struct fscache_operation *, bool); | 124 | extern void fscache_op_complete(struct fscache_operation *, bool); |
121 | extern void fscache_put_operation(struct fscache_operation *); | 125 | extern void fscache_put_operation(struct fscache_operation *); |
122 | 126 | extern void fscache_operation_init(struct fscache_operation *, | |
123 | /** | 127 | fscache_operation_processor_t, |
124 | * fscache_operation_init - Do basic initialisation of an operation | 128 | fscache_operation_cancel_t, |
125 | * @op: The operation to initialise | 129 | fscache_operation_release_t); |
126 | * @release: The release function to assign | ||
127 | * | ||
128 | * Do basic initialisation of an operation. The caller must still set flags, | ||
129 | * object and processor if needed. | ||
130 | */ | ||
131 | static inline void fscache_operation_init(struct fscache_operation *op, | ||
132 | fscache_operation_processor_t processor, | ||
133 | fscache_operation_release_t release) | ||
134 | { | ||
135 | INIT_WORK(&op->work, fscache_op_work_func); | ||
136 | atomic_set(&op->usage, 1); | ||
137 | op->state = FSCACHE_OP_ST_INITIALISED; | ||
138 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | ||
139 | op->processor = processor; | ||
140 | op->release = release; | ||
141 | INIT_LIST_HEAD(&op->pend_link); | ||
142 | } | ||
143 | 130 | ||
144 | /* | 131 | /* |
145 | * data read operation | 132 | * data read operation |
146 | */ | 133 | */ |
147 | struct fscache_retrieval { | 134 | struct fscache_retrieval { |
148 | struct fscache_operation op; | 135 | struct fscache_operation op; |
136 | struct fscache_cookie *cookie; /* The netfs cookie */ | ||
149 | struct address_space *mapping; /* netfs pages */ | 137 | struct address_space *mapping; /* netfs pages */ |
150 | fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ | 138 | fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ |
151 | void *context; /* netfs read context (pinned) */ | 139 | void *context; /* netfs read context (pinned) */ |
@@ -371,6 +359,7 @@ struct fscache_object { | |||
371 | #define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ | 359 | #define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ |
372 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ | 360 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ |
373 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ | 361 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ |
362 | #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ | ||
374 | 363 | ||
375 | struct list_head cache_link; /* link in cache->object_list */ | 364 | struct list_head cache_link; /* link in cache->object_list */ |
376 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ | 365 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ |
@@ -410,17 +399,16 @@ static inline bool fscache_object_is_available(struct fscache_object *object) | |||
410 | return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); | 399 | return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); |
411 | } | 400 | } |
412 | 401 | ||
413 | static inline bool fscache_object_is_active(struct fscache_object *object) | 402 | static inline bool fscache_cache_is_broken(struct fscache_object *object) |
414 | { | 403 | { |
415 | return fscache_object_is_available(object) && | 404 | return test_bit(FSCACHE_IOERROR, &object->cache->flags); |
416 | fscache_object_is_live(object) && | ||
417 | !test_bit(FSCACHE_IOERROR, &object->cache->flags); | ||
418 | } | 405 | } |
419 | 406 | ||
420 | static inline bool fscache_object_is_dead(struct fscache_object *object) | 407 | static inline bool fscache_object_is_active(struct fscache_object *object) |
421 | { | 408 | { |
422 | return fscache_object_is_dying(object) && | 409 | return fscache_object_is_available(object) && |
423 | test_bit(FSCACHE_IOERROR, &object->cache->flags); | 410 | fscache_object_is_live(object) && |
411 | !fscache_cache_is_broken(object); | ||
424 | } | 412 | } |
425 | 413 | ||
426 | /** | 414 | /** |
@@ -551,4 +539,15 @@ extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | |||
551 | const void *data, | 539 | const void *data, |
552 | uint16_t datalen); | 540 | uint16_t datalen); |
553 | 541 | ||
542 | extern void fscache_object_retrying_stale(struct fscache_object *object); | ||
543 | |||
544 | enum fscache_why_object_killed { | ||
545 | FSCACHE_OBJECT_IS_STALE, | ||
546 | FSCACHE_OBJECT_NO_SPACE, | ||
547 | FSCACHE_OBJECT_WAS_RETIRED, | ||
548 | FSCACHE_OBJECT_WAS_CULLED, | ||
549 | }; | ||
550 | extern void fscache_object_mark_killed(struct fscache_object *object, | ||
551 | enum fscache_why_object_killed why); | ||
552 | |||
554 | #endif /* _LINUX_FSCACHE_CACHE_H */ | 553 | #endif /* _LINUX_FSCACHE_CACHE_H */ |