aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fscache/page.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fscache/page.c')
-rw-r--r--fs/fscache/page.c195
1 files changed, 158 insertions, 37 deletions
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 3f7a59bfa7ad..ff000e52072d 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -56,6 +56,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
56 56
57 _enter("%p,%p,%x", cookie, page, gfp); 57 _enter("%p,%p,%x", cookie, page, gfp);
58 58
59try_again:
59 rcu_read_lock(); 60 rcu_read_lock();
60 val = radix_tree_lookup(&cookie->stores, page->index); 61 val = radix_tree_lookup(&cookie->stores, page->index);
61 if (!val) { 62 if (!val) {
@@ -104,11 +105,19 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
104 return true; 105 return true;
105 106
106page_busy: 107page_busy:
107 /* we might want to wait here, but that could deadlock the allocator as 108 /* We will wait here if we're allowed to, but that could deadlock the
108 * the work threads writing to the cache may all end up sleeping 109 * allocator as the work threads writing to the cache may all end up
109 * on memory allocation */ 110 * sleeping on memory allocation, so we may need to impose a timeout
110 fscache_stat(&fscache_n_store_vmscan_busy); 111 * too. */
111 return false; 112 if (!(gfp & __GFP_WAIT)) {
113 fscache_stat(&fscache_n_store_vmscan_busy);
114 return false;
115 }
116
117 fscache_stat(&fscache_n_store_vmscan_wait);
118 __fscache_wait_on_page_write(cookie, page);
119 gfp &= ~__GFP_WAIT;
120 goto try_again;
112} 121}
113EXPORT_SYMBOL(__fscache_maybe_release_page); 122EXPORT_SYMBOL(__fscache_maybe_release_page);
114 123
@@ -162,6 +171,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
162 fscache_abort_object(object); 171 fscache_abort_object(object);
163 } 172 }
164 173
174 fscache_op_complete(op, true);
165 _leave(""); 175 _leave("");
166} 176}
167 177
@@ -223,6 +233,8 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
223 233
224 _enter("{OP%x}", op->op.debug_id); 234 _enter("{OP%x}", op->op.debug_id);
225 235
236 ASSERTCMP(op->n_pages, ==, 0);
237
226 fscache_hist(fscache_retrieval_histogram, op->start_time); 238 fscache_hist(fscache_retrieval_histogram, op->start_time);
227 if (op->context) 239 if (op->context)
228 fscache_put_context(op->op.object->cookie, op->context); 240 fscache_put_context(op->op.object->cookie, op->context);
@@ -291,6 +303,17 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
291} 303}
292 304
293/* 305/*
306 * Handle cancellation of a pending retrieval op
307 */
308static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
309{
310 struct fscache_retrieval *op =
311 container_of(_op, struct fscache_retrieval, op);
312
313 op->n_pages = 0;
314}
315
316/*
294 * wait for an object to become active (or dead) 317 * wait for an object to become active (or dead)
295 */ 318 */
296static int fscache_wait_for_retrieval_activation(struct fscache_object *object, 319static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
@@ -307,8 +330,8 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
307 fscache_stat(stat_op_waits); 330 fscache_stat(stat_op_waits);
308 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 331 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
309 fscache_wait_bit_interruptible, 332 fscache_wait_bit_interruptible,
310 TASK_INTERRUPTIBLE) < 0) { 333 TASK_INTERRUPTIBLE) != 0) {
311 ret = fscache_cancel_op(&op->op); 334 ret = fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
312 if (ret == 0) 335 if (ret == 0)
313 return -ERESTARTSYS; 336 return -ERESTARTSYS;
314 337
@@ -320,7 +343,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
320 _debug("<<< GO"); 343 _debug("<<< GO");
321 344
322check_if_dead: 345check_if_dead:
346 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
347 fscache_stat(stat_object_dead);
348 _leave(" = -ENOBUFS [cancelled]");
349 return -ENOBUFS;
350 }
323 if (unlikely(fscache_object_is_dead(object))) { 351 if (unlikely(fscache_object_is_dead(object))) {
352 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
353 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
324 fscache_stat(stat_object_dead); 354 fscache_stat(stat_object_dead);
325 return -ENOBUFS; 355 return -ENOBUFS;
326 } 356 }
@@ -353,6 +383,11 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
353 if (hlist_empty(&cookie->backing_objects)) 383 if (hlist_empty(&cookie->backing_objects))
354 goto nobufs; 384 goto nobufs;
355 385
386 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
387 _leave(" = -ENOBUFS [invalidating]");
388 return -ENOBUFS;
389 }
390
356 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 391 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
357 ASSERTCMP(page, !=, NULL); 392 ASSERTCMP(page, !=, NULL);
358 393
@@ -364,6 +399,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
364 _leave(" = -ENOMEM"); 399 _leave(" = -ENOMEM");
365 return -ENOMEM; 400 return -ENOMEM;
366 } 401 }
402 op->n_pages = 1;
367 403
368 spin_lock(&cookie->lock); 404 spin_lock(&cookie->lock);
369 405
@@ -375,10 +411,10 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
375 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); 411 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
376 412
377 atomic_inc(&object->n_reads); 413 atomic_inc(&object->n_reads);
378 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 414 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
379 415
380 if (fscache_submit_op(object, &op->op) < 0) 416 if (fscache_submit_op(object, &op->op) < 0)
381 goto nobufs_unlock; 417 goto nobufs_unlock_dec;
382 spin_unlock(&cookie->lock); 418 spin_unlock(&cookie->lock);
383 419
384 fscache_stat(&fscache_n_retrieval_ops); 420 fscache_stat(&fscache_n_retrieval_ops);
@@ -425,6 +461,8 @@ error:
425 _leave(" = %d", ret); 461 _leave(" = %d", ret);
426 return ret; 462 return ret;
427 463
464nobufs_unlock_dec:
465 atomic_dec(&object->n_reads);
428nobufs_unlock: 466nobufs_unlock:
429 spin_unlock(&cookie->lock); 467 spin_unlock(&cookie->lock);
430 kfree(op); 468 kfree(op);
@@ -472,6 +510,11 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
472 if (hlist_empty(&cookie->backing_objects)) 510 if (hlist_empty(&cookie->backing_objects))
473 goto nobufs; 511 goto nobufs;
474 512
513 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
514 _leave(" = -ENOBUFS [invalidating]");
515 return -ENOBUFS;
516 }
517
475 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 518 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
476 ASSERTCMP(*nr_pages, >, 0); 519 ASSERTCMP(*nr_pages, >, 0);
477 ASSERT(!list_empty(pages)); 520 ASSERT(!list_empty(pages));
@@ -482,6 +525,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
482 op = fscache_alloc_retrieval(mapping, end_io_func, context); 525 op = fscache_alloc_retrieval(mapping, end_io_func, context);
483 if (!op) 526 if (!op)
484 return -ENOMEM; 527 return -ENOMEM;
528 op->n_pages = *nr_pages;
485 529
486 spin_lock(&cookie->lock); 530 spin_lock(&cookie->lock);
487 531
@@ -491,10 +535,10 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
491 struct fscache_object, cookie_link); 535 struct fscache_object, cookie_link);
492 536
493 atomic_inc(&object->n_reads); 537 atomic_inc(&object->n_reads);
494 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); 538 __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
495 539
496 if (fscache_submit_op(object, &op->op) < 0) 540 if (fscache_submit_op(object, &op->op) < 0)
497 goto nobufs_unlock; 541 goto nobufs_unlock_dec;
498 spin_unlock(&cookie->lock); 542 spin_unlock(&cookie->lock);
499 543
500 fscache_stat(&fscache_n_retrieval_ops); 544 fscache_stat(&fscache_n_retrieval_ops);
@@ -541,6 +585,8 @@ error:
541 _leave(" = %d", ret); 585 _leave(" = %d", ret);
542 return ret; 586 return ret;
543 587
588nobufs_unlock_dec:
589 atomic_dec(&object->n_reads);
544nobufs_unlock: 590nobufs_unlock:
545 spin_unlock(&cookie->lock); 591 spin_unlock(&cookie->lock);
546 kfree(op); 592 kfree(op);
@@ -577,12 +623,18 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
577 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); 623 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
578 ASSERTCMP(page, !=, NULL); 624 ASSERTCMP(page, !=, NULL);
579 625
626 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
627 _leave(" = -ENOBUFS [invalidating]");
628 return -ENOBUFS;
629 }
630
580 if (fscache_wait_for_deferred_lookup(cookie) < 0) 631 if (fscache_wait_for_deferred_lookup(cookie) < 0)
581 return -ERESTARTSYS; 632 return -ERESTARTSYS;
582 633
583 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 634 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
584 if (!op) 635 if (!op)
585 return -ENOMEM; 636 return -ENOMEM;
637 op->n_pages = 1;
586 638
587 spin_lock(&cookie->lock); 639 spin_lock(&cookie->lock);
588 640
@@ -658,9 +710,27 @@ static void fscache_write_op(struct fscache_operation *_op)
658 spin_lock(&object->lock); 710 spin_lock(&object->lock);
659 cookie = object->cookie; 711 cookie = object->cookie;
660 712
661 if (!fscache_object_is_active(object) || !cookie) { 713 if (!fscache_object_is_active(object)) {
714 /* If we get here, then the on-disk cache object likely longer
715 * exists, so we should just cancel this write operation.
716 */
717 spin_unlock(&object->lock);
718 fscache_op_complete(&op->op, false);
719 _leave(" [inactive]");
720 return;
721 }
722
723 if (!cookie) {
724 /* If we get here, then the cookie belonging to the object was
725 * detached, probably by the cookie being withdrawn due to
726 * memory pressure, which means that the pages we might write
727 * to the cache from no longer exist - therefore, we can just
728 * cancel this write operation.
729 */
662 spin_unlock(&object->lock); 730 spin_unlock(&object->lock);
663 _leave(""); 731 fscache_op_complete(&op->op, false);
732 _leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}",
733 _op->flags, _op->state, object->state, object->flags);
664 return; 734 return;
665 } 735 }
666 736
@@ -696,6 +766,7 @@ static void fscache_write_op(struct fscache_operation *_op)
696 fscache_end_page_write(object, page); 766 fscache_end_page_write(object, page);
697 if (ret < 0) { 767 if (ret < 0) {
698 fscache_abort_object(object); 768 fscache_abort_object(object);
769 fscache_op_complete(&op->op, true);
699 } else { 770 } else {
700 fscache_enqueue_operation(&op->op); 771 fscache_enqueue_operation(&op->op);
701 } 772 }
@@ -710,6 +781,38 @@ superseded:
710 spin_unlock(&cookie->stores_lock); 781 spin_unlock(&cookie->stores_lock);
711 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 782 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
712 spin_unlock(&object->lock); 783 spin_unlock(&object->lock);
784 fscache_op_complete(&op->op, true);
785 _leave("");
786}
787
788/*
789 * Clear the pages pending writing for invalidation
790 */
791void fscache_invalidate_writes(struct fscache_cookie *cookie)
792{
793 struct page *page;
794 void *results[16];
795 int n, i;
796
797 _enter("");
798
799 while (spin_lock(&cookie->stores_lock),
800 n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
801 ARRAY_SIZE(results),
802 FSCACHE_COOKIE_PENDING_TAG),
803 n > 0) {
804 for (i = n - 1; i >= 0; i--) {
805 page = results[i];
806 radix_tree_delete(&cookie->stores, page->index);
807 }
808
809 spin_unlock(&cookie->stores_lock);
810
811 for (i = n - 1; i >= 0; i--)
812 page_cache_release(results[i]);
813 }
814
815 spin_unlock(&cookie->stores_lock);
713 _leave(""); 816 _leave("");
714} 817}
715 818
@@ -759,7 +862,12 @@ int __fscache_write_page(struct fscache_cookie *cookie,
759 862
760 fscache_stat(&fscache_n_stores); 863 fscache_stat(&fscache_n_stores);
761 864
762 op = kzalloc(sizeof(*op), GFP_NOIO); 865 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
866 _leave(" = -ENOBUFS [invalidating]");
867 return -ENOBUFS;
868 }
869
870 op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
763 if (!op) 871 if (!op)
764 goto nomem; 872 goto nomem;
765 873
@@ -915,6 +1023,40 @@ done:
915EXPORT_SYMBOL(__fscache_uncache_page); 1023EXPORT_SYMBOL(__fscache_uncache_page);
916 1024
917/** 1025/**
1026 * fscache_mark_page_cached - Mark a page as being cached
1027 * @op: The retrieval op pages are being marked for
1028 * @page: The page to be marked
1029 *
1030 * Mark a netfs page as being cached. After this is called, the netfs
1031 * must call fscache_uncache_page() to remove the mark.
1032 */
1033void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
1034{
1035 struct fscache_cookie *cookie = op->op.object->cookie;
1036
1037#ifdef CONFIG_FSCACHE_STATS
1038 atomic_inc(&fscache_n_marks);
1039#endif
1040
1041 _debug("- mark %p{%lx}", page, page->index);
1042 if (TestSetPageFsCache(page)) {
1043 static bool once_only;
1044 if (!once_only) {
1045 once_only = true;
1046 printk(KERN_WARNING "FS-Cache:"
1047 " Cookie type %s marked page %lx"
1048 " multiple times\n",
1049 cookie->def->name, page->index);
1050 }
1051 }
1052
1053 if (cookie->def->mark_page_cached)
1054 cookie->def->mark_page_cached(cookie->netfs_data,
1055 op->mapping, page);
1056}
1057EXPORT_SYMBOL(fscache_mark_page_cached);
1058
1059/**
918 * fscache_mark_pages_cached - Mark pages as being cached 1060 * fscache_mark_pages_cached - Mark pages as being cached
919 * @op: The retrieval op pages are being marked for 1061 * @op: The retrieval op pages are being marked for
920 * @pagevec: The pages to be marked 1062 * @pagevec: The pages to be marked
@@ -925,32 +1067,11 @@ EXPORT_SYMBOL(__fscache_uncache_page);
925void fscache_mark_pages_cached(struct fscache_retrieval *op, 1067void fscache_mark_pages_cached(struct fscache_retrieval *op,
926 struct pagevec *pagevec) 1068 struct pagevec *pagevec)
927{ 1069{
928 struct fscache_cookie *cookie = op->op.object->cookie;
929 unsigned long loop; 1070 unsigned long loop;
930 1071
931#ifdef CONFIG_FSCACHE_STATS 1072 for (loop = 0; loop < pagevec->nr; loop++)
932 atomic_add(pagevec->nr, &fscache_n_marks); 1073 fscache_mark_page_cached(op, pagevec->pages[loop]);
933#endif
934
935 for (loop = 0; loop < pagevec->nr; loop++) {
936 struct page *page = pagevec->pages[loop];
937
938 _debug("- mark %p{%lx}", page, page->index);
939 if (TestSetPageFsCache(page)) {
940 static bool once_only;
941 if (!once_only) {
942 once_only = true;
943 printk(KERN_WARNING "FS-Cache:"
944 " Cookie type %s marked page %lx"
945 " multiple times\n",
946 cookie->def->name, page->index);
947 }
948 }
949 }
950 1074
951 if (cookie->def->mark_pages_cached)
952 cookie->def->mark_pages_cached(cookie->netfs_data,
953 op->mapping, pagevec);
954 pagevec_reinit(pagevec); 1075 pagevec_reinit(pagevec);
955} 1076}
956EXPORT_SYMBOL(fscache_mark_pages_cached); 1077EXPORT_SYMBOL(fscache_mark_pages_cached);