aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cachefiles
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-20 21:14:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-20 21:14:31 -0500
commit1f0377ff088ed2971c57debc9b0c3b846ec431fd (patch)
treecd53c8981269e57e38e4285abd71cc990e1cfc67 /fs/cachefiles
parent54d46ea993744c5408e39ce0cb4851e13cbea716 (diff)
parentb729d75d19777a5dd34672020516eada43ff026f (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull VFS update from Al Viro: "fscache fixes, ESTALE patchset, vmtruncate removal series, assorted misc stuff." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (79 commits) vfs: make lremovexattr retry once on ESTALE error vfs: make removexattr retry once on ESTALE vfs: make llistxattr retry once on ESTALE error vfs: make listxattr retry once on ESTALE error vfs: make lgetxattr retry once on ESTALE vfs: make getxattr retry once on an ESTALE error vfs: allow lsetxattr() to retry once on ESTALE errors vfs: allow setxattr to retry once on ESTALE errors vfs: allow utimensat() calls to retry once on an ESTALE error vfs: fix user_statfs to retry once on ESTALE errors vfs: make fchownat retry once on ESTALE errors vfs: make fchmodat retry once on ESTALE errors vfs: have chroot retry once on ESTALE error vfs: have chdir retry lookup and call once on ESTALE error vfs: have faccessat retry once on an ESTALE error vfs: have do_sys_truncate retry once on an ESTALE error vfs: fix renameat to retry on ESTALE errors vfs: make do_unlinkat retry once on ESTALE errors vfs: make do_rmdir retry once on ESTALE errors vfs: add a flags argument to user_path_parent ...
Diffstat (limited to 'fs/cachefiles')
-rw-r--r--fs/cachefiles/interface.c57
-rw-r--r--fs/cachefiles/internal.h2
-rw-r--r--fs/cachefiles/key.c2
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/cachefiles/rdwr.c114
-rw-r--r--fs/cachefiles/xattr.c2
6 files changed, 125 insertions, 55 deletions
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 67bef6d01484..746ce532e130 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -41,12 +41,12 @@ static struct fscache_object *cachefiles_alloc_object(
41 41
42 _enter("{%s},%p,", cache->cache.identifier, cookie); 42 _enter("{%s},%p,", cache->cache.identifier, cookie);
43 43
44 lookup_data = kmalloc(sizeof(*lookup_data), GFP_KERNEL); 44 lookup_data = kmalloc(sizeof(*lookup_data), cachefiles_gfp);
45 if (!lookup_data) 45 if (!lookup_data)
46 goto nomem_lookup_data; 46 goto nomem_lookup_data;
47 47
48 /* create a new object record and a temporary leaf image */ 48 /* create a new object record and a temporary leaf image */
49 object = kmem_cache_alloc(cachefiles_object_jar, GFP_KERNEL); 49 object = kmem_cache_alloc(cachefiles_object_jar, cachefiles_gfp);
50 if (!object) 50 if (!object)
51 goto nomem_object; 51 goto nomem_object;
52 52
@@ -63,7 +63,7 @@ static struct fscache_object *cachefiles_alloc_object(
63 * - stick the length on the front and leave space on the back for the 63 * - stick the length on the front and leave space on the back for the
64 * encoder 64 * encoder
65 */ 65 */
66 buffer = kmalloc((2 + 512) + 3, GFP_KERNEL); 66 buffer = kmalloc((2 + 512) + 3, cachefiles_gfp);
67 if (!buffer) 67 if (!buffer)
68 goto nomem_buffer; 68 goto nomem_buffer;
69 69
@@ -219,7 +219,7 @@ static void cachefiles_update_object(struct fscache_object *_object)
219 return; 219 return;
220 } 220 }
221 221
222 auxdata = kmalloc(2 + 512 + 3, GFP_KERNEL); 222 auxdata = kmalloc(2 + 512 + 3, cachefiles_gfp);
223 if (!auxdata) { 223 if (!auxdata) {
224 _leave(" [nomem]"); 224 _leave(" [nomem]");
225 return; 225 return;
@@ -441,6 +441,54 @@ truncate_failed:
441} 441}
442 442
443/* 443/*
444 * Invalidate an object
445 */
446static void cachefiles_invalidate_object(struct fscache_operation *op)
447{
448 struct cachefiles_object *object;
449 struct cachefiles_cache *cache;
450 const struct cred *saved_cred;
451 struct path path;
452 uint64_t ni_size;
453 int ret;
454
455 object = container_of(op->object, struct cachefiles_object, fscache);
456 cache = container_of(object->fscache.cache,
457 struct cachefiles_cache, cache);
458
459 op->object->cookie->def->get_attr(op->object->cookie->netfs_data,
460 &ni_size);
461
462 _enter("{OBJ%x},[%llu]",
463 op->object->debug_id, (unsigned long long)ni_size);
464
465 if (object->backer) {
466 ASSERT(S_ISREG(object->backer->d_inode->i_mode));
467
468 fscache_set_store_limit(&object->fscache, ni_size);
469
470 path.dentry = object->backer;
471 path.mnt = cache->mnt;
472
473 cachefiles_begin_secure(cache, &saved_cred);
474 ret = vfs_truncate(&path, 0);
475 if (ret == 0)
476 ret = vfs_truncate(&path, ni_size);
477 cachefiles_end_secure(cache, saved_cred);
478
479 if (ret != 0) {
480 fscache_set_store_limit(&object->fscache, 0);
481 if (ret == -EIO)
482 cachefiles_io_error_obj(object,
483 "Invalidate failed");
484 }
485 }
486
487 fscache_op_complete(op, true);
488 _leave("");
489}
490
491/*
444 * dissociate a cache from all the pages it was backing 492 * dissociate a cache from all the pages it was backing
445 */ 493 */
446static void cachefiles_dissociate_pages(struct fscache_cache *cache) 494static void cachefiles_dissociate_pages(struct fscache_cache *cache)
@@ -455,6 +503,7 @@ const struct fscache_cache_ops cachefiles_cache_ops = {
455 .lookup_complete = cachefiles_lookup_complete, 503 .lookup_complete = cachefiles_lookup_complete,
456 .grab_object = cachefiles_grab_object, 504 .grab_object = cachefiles_grab_object,
457 .update_object = cachefiles_update_object, 505 .update_object = cachefiles_update_object,
506 .invalidate_object = cachefiles_invalidate_object,
458 .drop_object = cachefiles_drop_object, 507 .drop_object = cachefiles_drop_object,
459 .put_object = cachefiles_put_object, 508 .put_object = cachefiles_put_object,
460 .sync_cache = cachefiles_sync_cache, 509 .sync_cache = cachefiles_sync_cache,
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index bd6bc1bde2d7..49382519907a 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -23,6 +23,8 @@ extern unsigned cachefiles_debug;
23#define CACHEFILES_DEBUG_KLEAVE 2 23#define CACHEFILES_DEBUG_KLEAVE 2
24#define CACHEFILES_DEBUG_KDEBUG 4 24#define CACHEFILES_DEBUG_KDEBUG 4
25 25
26#define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC)
27
26/* 28/*
27 * node records 29 * node records
28 */ 30 */
diff --git a/fs/cachefiles/key.c b/fs/cachefiles/key.c
index 81b8b2b3a674..33b58c60f2d1 100644
--- a/fs/cachefiles/key.c
+++ b/fs/cachefiles/key.c
@@ -78,7 +78,7 @@ char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
78 78
79 _debug("max: %d", max); 79 _debug("max: %d", max);
80 80
81 key = kmalloc(max, GFP_KERNEL); 81 key = kmalloc(max, cachefiles_gfp);
82 if (!key) 82 if (!key)
83 return NULL; 83 return NULL;
84 84
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index b0b5f7cdfffa..8c01c5fcdf75 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -40,8 +40,7 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
40 printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n", 40 printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
41 prefix, fscache_object_states[object->fscache.state], 41 prefix, fscache_object_states[object->fscache.state],
42 object->fscache.flags, work_busy(&object->fscache.work), 42 object->fscache.flags, work_busy(&object->fscache.work),
43 object->fscache.events, 43 object->fscache.events, object->fscache.event_mask);
44 object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
45 printk(KERN_ERR "%sops=%u inp=%u exc=%u\n", 44 printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
46 prefix, object->fscache.n_ops, object->fscache.n_in_progress, 45 prefix, object->fscache.n_ops, object->fscache.n_in_progress,
47 object->fscache.n_exclusive); 46 object->fscache.n_exclusive);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c994691d9445..480992259707 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -77,25 +77,25 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
77 struct page *backpage = monitor->back_page, *backpage2; 77 struct page *backpage = monitor->back_page, *backpage2;
78 int ret; 78 int ret;
79 79
80 kenter("{ino=%lx},{%lx,%lx}", 80 _enter("{ino=%lx},{%lx,%lx}",
81 object->backer->d_inode->i_ino, 81 object->backer->d_inode->i_ino,
82 backpage->index, backpage->flags); 82 backpage->index, backpage->flags);
83 83
84 /* skip if the page was truncated away completely */ 84 /* skip if the page was truncated away completely */
85 if (backpage->mapping != bmapping) { 85 if (backpage->mapping != bmapping) {
86 kleave(" = -ENODATA [mapping]"); 86 _leave(" = -ENODATA [mapping]");
87 return -ENODATA; 87 return -ENODATA;
88 } 88 }
89 89
90 backpage2 = find_get_page(bmapping, backpage->index); 90 backpage2 = find_get_page(bmapping, backpage->index);
91 if (!backpage2) { 91 if (!backpage2) {
92 kleave(" = -ENODATA [gone]"); 92 _leave(" = -ENODATA [gone]");
93 return -ENODATA; 93 return -ENODATA;
94 } 94 }
95 95
96 if (backpage != backpage2) { 96 if (backpage != backpage2) {
97 put_page(backpage2); 97 put_page(backpage2);
98 kleave(" = -ENODATA [different]"); 98 _leave(" = -ENODATA [different]");
99 return -ENODATA; 99 return -ENODATA;
100 } 100 }
101 101
@@ -114,7 +114,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
114 if (PageUptodate(backpage)) 114 if (PageUptodate(backpage))
115 goto unlock_discard; 115 goto unlock_discard;
116 116
117 kdebug("reissue read"); 117 _debug("reissue read");
118 ret = bmapping->a_ops->readpage(NULL, backpage); 118 ret = bmapping->a_ops->readpage(NULL, backpage);
119 if (ret < 0) 119 if (ret < 0)
120 goto unlock_discard; 120 goto unlock_discard;
@@ -129,7 +129,7 @@ static int cachefiles_read_reissue(struct cachefiles_object *object,
129 } 129 }
130 130
131 /* it'll reappear on the todo list */ 131 /* it'll reappear on the todo list */
132 kleave(" = -EINPROGRESS"); 132 _leave(" = -EINPROGRESS");
133 return -EINPROGRESS; 133 return -EINPROGRESS;
134 134
135unlock_discard: 135unlock_discard:
@@ -137,7 +137,7 @@ unlock_discard:
137 spin_lock_irq(&object->work_lock); 137 spin_lock_irq(&object->work_lock);
138 list_del(&monitor->op_link); 138 list_del(&monitor->op_link);
139 spin_unlock_irq(&object->work_lock); 139 spin_unlock_irq(&object->work_lock);
140 kleave(" = %d", ret); 140 _leave(" = %d", ret);
141 return ret; 141 return ret;
142} 142}
143 143
@@ -174,11 +174,13 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
174 _debug("- copy {%lu}", monitor->back_page->index); 174 _debug("- copy {%lu}", monitor->back_page->index);
175 175
176 recheck: 176 recheck:
177 if (PageUptodate(monitor->back_page)) { 177 if (test_bit(FSCACHE_COOKIE_INVALIDATING,
178 &object->fscache.cookie->flags)) {
179 error = -ESTALE;
180 } else if (PageUptodate(monitor->back_page)) {
178 copy_highpage(monitor->netfs_page, monitor->back_page); 181 copy_highpage(monitor->netfs_page, monitor->back_page);
179 182 fscache_mark_page_cached(monitor->op,
180 pagevec_add(&pagevec, monitor->netfs_page); 183 monitor->netfs_page);
181 fscache_mark_pages_cached(monitor->op, &pagevec);
182 error = 0; 184 error = 0;
183 } else if (!PageError(monitor->back_page)) { 185 } else if (!PageError(monitor->back_page)) {
184 /* the page has probably been truncated */ 186 /* the page has probably been truncated */
@@ -198,6 +200,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
198 200
199 fscache_end_io(op, monitor->netfs_page, error); 201 fscache_end_io(op, monitor->netfs_page, error);
200 page_cache_release(monitor->netfs_page); 202 page_cache_release(monitor->netfs_page);
203 fscache_retrieval_complete(op, 1);
201 fscache_put_retrieval(op); 204 fscache_put_retrieval(op);
202 kfree(monitor); 205 kfree(monitor);
203 206
@@ -239,7 +242,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
239 _debug("read back %p{%lu,%d}", 242 _debug("read back %p{%lu,%d}",
240 netpage, netpage->index, page_count(netpage)); 243 netpage, netpage->index, page_count(netpage));
241 244
242 monitor = kzalloc(sizeof(*monitor), GFP_KERNEL); 245 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
243 if (!monitor) 246 if (!monitor)
244 goto nomem; 247 goto nomem;
245 248
@@ -258,13 +261,14 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
258 goto backing_page_already_present; 261 goto backing_page_already_present;
259 262
260 if (!newpage) { 263 if (!newpage) {
261 newpage = page_cache_alloc_cold(bmapping); 264 newpage = __page_cache_alloc(cachefiles_gfp |
265 __GFP_COLD);
262 if (!newpage) 266 if (!newpage)
263 goto nomem_monitor; 267 goto nomem_monitor;
264 } 268 }
265 269
266 ret = add_to_page_cache(newpage, bmapping, 270 ret = add_to_page_cache(newpage, bmapping,
267 netpage->index, GFP_KERNEL); 271 netpage->index, cachefiles_gfp);
268 if (ret == 0) 272 if (ret == 0)
269 goto installed_new_backing_page; 273 goto installed_new_backing_page;
270 if (ret != -EEXIST) 274 if (ret != -EEXIST)
@@ -335,11 +339,11 @@ backing_page_already_present:
335backing_page_already_uptodate: 339backing_page_already_uptodate:
336 _debug("- uptodate"); 340 _debug("- uptodate");
337 341
338 pagevec_add(pagevec, netpage); 342 fscache_mark_page_cached(op, netpage);
339 fscache_mark_pages_cached(op, pagevec);
340 343
341 copy_highpage(netpage, backpage); 344 copy_highpage(netpage, backpage);
342 fscache_end_io(op, netpage, 0); 345 fscache_end_io(op, netpage, 0);
346 fscache_retrieval_complete(op, 1);
343 347
344success: 348success:
345 _debug("success"); 349 _debug("success");
@@ -357,10 +361,13 @@ out:
357 361
358read_error: 362read_error:
359 _debug("read error %d", ret); 363 _debug("read error %d", ret);
360 if (ret == -ENOMEM) 364 if (ret == -ENOMEM) {
365 fscache_retrieval_complete(op, 1);
361 goto out; 366 goto out;
367 }
362io_error: 368io_error:
363 cachefiles_io_error_obj(object, "Page read error on backing file"); 369 cachefiles_io_error_obj(object, "Page read error on backing file");
370 fscache_retrieval_complete(op, 1);
364 ret = -ENOBUFS; 371 ret = -ENOBUFS;
365 goto out; 372 goto out;
366 373
@@ -370,6 +377,7 @@ nomem_monitor:
370 fscache_put_retrieval(monitor->op); 377 fscache_put_retrieval(monitor->op);
371 kfree(monitor); 378 kfree(monitor);
372nomem: 379nomem:
380 fscache_retrieval_complete(op, 1);
373 _leave(" = -ENOMEM"); 381 _leave(" = -ENOMEM");
374 return -ENOMEM; 382 return -ENOMEM;
375} 383}
@@ -408,7 +416,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
408 _enter("{%p},{%lx},,,", object, page->index); 416 _enter("{%p},{%lx},,,", object, page->index);
409 417
410 if (!object->backer) 418 if (!object->backer)
411 return -ENOBUFS; 419 goto enobufs;
412 420
413 inode = object->backer->d_inode; 421 inode = object->backer->d_inode;
414 ASSERT(S_ISREG(inode->i_mode)); 422 ASSERT(S_ISREG(inode->i_mode));
@@ -417,7 +425,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
417 425
418 /* calculate the shift required to use bmap */ 426 /* calculate the shift required to use bmap */
419 if (inode->i_sb->s_blocksize > PAGE_SIZE) 427 if (inode->i_sb->s_blocksize > PAGE_SIZE)
420 return -ENOBUFS; 428 goto enobufs;
421 429
422 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 430 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
423 431
@@ -448,15 +456,20 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
448 &pagevec); 456 &pagevec);
449 } else if (cachefiles_has_space(cache, 0, 1) == 0) { 457 } else if (cachefiles_has_space(cache, 0, 1) == 0) {
450 /* there's space in the cache we can use */ 458 /* there's space in the cache we can use */
451 pagevec_add(&pagevec, page); 459 fscache_mark_page_cached(op, page);
452 fscache_mark_pages_cached(op, &pagevec); 460 fscache_retrieval_complete(op, 1);
453 ret = -ENODATA; 461 ret = -ENODATA;
454 } else { 462 } else {
455 ret = -ENOBUFS; 463 goto enobufs;
456 } 464 }
457 465
458 _leave(" = %d", ret); 466 _leave(" = %d", ret);
459 return ret; 467 return ret;
468
469enobufs:
470 fscache_retrieval_complete(op, 1);
471 _leave(" = -ENOBUFS");
472 return -ENOBUFS;
460} 473}
461 474
462/* 475/*
@@ -465,8 +478,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
465 */ 478 */
466static int cachefiles_read_backing_file(struct cachefiles_object *object, 479static int cachefiles_read_backing_file(struct cachefiles_object *object,
467 struct fscache_retrieval *op, 480 struct fscache_retrieval *op,
468 struct list_head *list, 481 struct list_head *list)
469 struct pagevec *mark_pvec)
470{ 482{
471 struct cachefiles_one_read *monitor = NULL; 483 struct cachefiles_one_read *monitor = NULL;
472 struct address_space *bmapping = object->backer->d_inode->i_mapping; 484 struct address_space *bmapping = object->backer->d_inode->i_mapping;
@@ -485,7 +497,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
485 netpage, netpage->index, page_count(netpage)); 497 netpage, netpage->index, page_count(netpage));
486 498
487 if (!monitor) { 499 if (!monitor) {
488 monitor = kzalloc(sizeof(*monitor), GFP_KERNEL); 500 monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
489 if (!monitor) 501 if (!monitor)
490 goto nomem; 502 goto nomem;
491 503
@@ -500,13 +512,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
500 goto backing_page_already_present; 512 goto backing_page_already_present;
501 513
502 if (!newpage) { 514 if (!newpage) {
503 newpage = page_cache_alloc_cold(bmapping); 515 newpage = __page_cache_alloc(cachefiles_gfp |
516 __GFP_COLD);
504 if (!newpage) 517 if (!newpage)
505 goto nomem; 518 goto nomem;
506 } 519 }
507 520
508 ret = add_to_page_cache(newpage, bmapping, 521 ret = add_to_page_cache(newpage, bmapping,
509 netpage->index, GFP_KERNEL); 522 netpage->index, cachefiles_gfp);
510 if (ret == 0) 523 if (ret == 0)
511 goto installed_new_backing_page; 524 goto installed_new_backing_page;
512 if (ret != -EEXIST) 525 if (ret != -EEXIST)
@@ -536,10 +549,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
536 _debug("- monitor add"); 549 _debug("- monitor add");
537 550
538 ret = add_to_page_cache(netpage, op->mapping, netpage->index, 551 ret = add_to_page_cache(netpage, op->mapping, netpage->index,
539 GFP_KERNEL); 552 cachefiles_gfp);
540 if (ret < 0) { 553 if (ret < 0) {
541 if (ret == -EEXIST) { 554 if (ret == -EEXIST) {
542 page_cache_release(netpage); 555 page_cache_release(netpage);
556 fscache_retrieval_complete(op, 1);
543 continue; 557 continue;
544 } 558 }
545 goto nomem; 559 goto nomem;
@@ -612,10 +626,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
612 _debug("- uptodate"); 626 _debug("- uptodate");
613 627
614 ret = add_to_page_cache(netpage, op->mapping, netpage->index, 628 ret = add_to_page_cache(netpage, op->mapping, netpage->index,
615 GFP_KERNEL); 629 cachefiles_gfp);
616 if (ret < 0) { 630 if (ret < 0) {
617 if (ret == -EEXIST) { 631 if (ret == -EEXIST) {
618 page_cache_release(netpage); 632 page_cache_release(netpage);
633 fscache_retrieval_complete(op, 1);
619 continue; 634 continue;
620 } 635 }
621 goto nomem; 636 goto nomem;
@@ -626,16 +641,17 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
626 page_cache_release(backpage); 641 page_cache_release(backpage);
627 backpage = NULL; 642 backpage = NULL;
628 643
629 if (!pagevec_add(mark_pvec, netpage)) 644 fscache_mark_page_cached(op, netpage);
630 fscache_mark_pages_cached(op, mark_pvec);
631 645
632 page_cache_get(netpage); 646 page_cache_get(netpage);
633 if (!pagevec_add(&lru_pvec, netpage)) 647 if (!pagevec_add(&lru_pvec, netpage))
634 __pagevec_lru_add_file(&lru_pvec); 648 __pagevec_lru_add_file(&lru_pvec);
635 649
650 /* the netpage is unlocked and marked up to date here */
636 fscache_end_io(op, netpage, 0); 651 fscache_end_io(op, netpage, 0);
637 page_cache_release(netpage); 652 page_cache_release(netpage);
638 netpage = NULL; 653 netpage = NULL;
654 fscache_retrieval_complete(op, 1);
639 continue; 655 continue;
640 } 656 }
641 657
@@ -661,6 +677,7 @@ out:
661 list_for_each_entry_safe(netpage, _n, list, lru) { 677 list_for_each_entry_safe(netpage, _n, list, lru) {
662 list_del(&netpage->lru); 678 list_del(&netpage->lru);
663 page_cache_release(netpage); 679 page_cache_release(netpage);
680 fscache_retrieval_complete(op, 1);
664 } 681 }
665 682
666 _leave(" = %d", ret); 683 _leave(" = %d", ret);
@@ -669,15 +686,17 @@ out:
669nomem: 686nomem:
670 _debug("nomem"); 687 _debug("nomem");
671 ret = -ENOMEM; 688 ret = -ENOMEM;
672 goto out; 689 goto record_page_complete;
673 690
674read_error: 691read_error:
675 _debug("read error %d", ret); 692 _debug("read error %d", ret);
676 if (ret == -ENOMEM) 693 if (ret == -ENOMEM)
677 goto out; 694 goto record_page_complete;
678io_error: 695io_error:
679 cachefiles_io_error_obj(object, "Page read error on backing file"); 696 cachefiles_io_error_obj(object, "Page read error on backing file");
680 ret = -ENOBUFS; 697 ret = -ENOBUFS;
698record_page_complete:
699 fscache_retrieval_complete(op, 1);
681 goto out; 700 goto out;
682} 701}
683 702
@@ -709,7 +728,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
709 *nr_pages); 728 *nr_pages);
710 729
711 if (!object->backer) 730 if (!object->backer)
712 return -ENOBUFS; 731 goto all_enobufs;
713 732
714 space = 1; 733 space = 1;
715 if (cachefiles_has_space(cache, 0, *nr_pages) < 0) 734 if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
@@ -722,7 +741,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
722 741
723 /* calculate the shift required to use bmap */ 742 /* calculate the shift required to use bmap */
724 if (inode->i_sb->s_blocksize > PAGE_SIZE) 743 if (inode->i_sb->s_blocksize > PAGE_SIZE)
725 return -ENOBUFS; 744 goto all_enobufs;
726 745
727 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 746 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
728 747
@@ -762,7 +781,10 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
762 nrbackpages++; 781 nrbackpages++;
763 } else if (space && pagevec_add(&pagevec, page) == 0) { 782 } else if (space && pagevec_add(&pagevec, page) == 0) {
764 fscache_mark_pages_cached(op, &pagevec); 783 fscache_mark_pages_cached(op, &pagevec);
784 fscache_retrieval_complete(op, 1);
765 ret = -ENODATA; 785 ret = -ENODATA;
786 } else {
787 fscache_retrieval_complete(op, 1);
766 } 788 }
767 } 789 }
768 790
@@ -775,18 +797,18 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
775 /* submit the apparently valid pages to the backing fs to be read from 797 /* submit the apparently valid pages to the backing fs to be read from
776 * disk */ 798 * disk */
777 if (nrbackpages > 0) { 799 if (nrbackpages > 0) {
778 ret2 = cachefiles_read_backing_file(object, op, &backpages, 800 ret2 = cachefiles_read_backing_file(object, op, &backpages);
779 &pagevec);
780 if (ret2 == -ENOMEM || ret2 == -EINTR) 801 if (ret2 == -ENOMEM || ret2 == -EINTR)
781 ret = ret2; 802 ret = ret2;
782 } 803 }
783 804
784 if (pagevec_count(&pagevec) > 0)
785 fscache_mark_pages_cached(op, &pagevec);
786
787 _leave(" = %d [nr=%u%s]", 805 _leave(" = %d [nr=%u%s]",
788 ret, *nr_pages, list_empty(pages) ? " empty" : ""); 806 ret, *nr_pages, list_empty(pages) ? " empty" : "");
789 return ret; 807 return ret;
808
809all_enobufs:
810 fscache_retrieval_complete(op, *nr_pages);
811 return -ENOBUFS;
790} 812}
791 813
792/* 814/*
@@ -806,7 +828,6 @@ int cachefiles_allocate_page(struct fscache_retrieval *op,
806{ 828{
807 struct cachefiles_object *object; 829 struct cachefiles_object *object;
808 struct cachefiles_cache *cache; 830 struct cachefiles_cache *cache;
809 struct pagevec pagevec;
810 int ret; 831 int ret;
811 832
812 object = container_of(op->op.object, 833 object = container_of(op->op.object,
@@ -817,14 +838,12 @@ int cachefiles_allocate_page(struct fscache_retrieval *op,
817 _enter("%p,{%lx},", object, page->index); 838 _enter("%p,{%lx},", object, page->index);
818 839
819 ret = cachefiles_has_space(cache, 0, 1); 840 ret = cachefiles_has_space(cache, 0, 1);
820 if (ret == 0) { 841 if (ret == 0)
821 pagevec_init(&pagevec, 0); 842 fscache_mark_page_cached(op, page);
822 pagevec_add(&pagevec, page); 843 else
823 fscache_mark_pages_cached(op, &pagevec);
824 } else {
825 ret = -ENOBUFS; 844 ret = -ENOBUFS;
826 }
827 845
846 fscache_retrieval_complete(op, 1);
828 _leave(" = %d", ret); 847 _leave(" = %d", ret);
829 return ret; 848 return ret;
830} 849}
@@ -874,6 +893,7 @@ int cachefiles_allocate_pages(struct fscache_retrieval *op,
874 ret = -ENOBUFS; 893 ret = -ENOBUFS;
875 } 894 }
876 895
896 fscache_retrieval_complete(op, *nr_pages);
877 _leave(" = %d", ret); 897 _leave(" = %d", ret);
878 return ret; 898 return ret;
879} 899}
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index e18b183b47e1..73b46288b54b 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -174,7 +174,7 @@ int cachefiles_check_object_xattr(struct cachefiles_object *object,
174 ASSERT(dentry); 174 ASSERT(dentry);
175 ASSERT(dentry->d_inode); 175 ASSERT(dentry->d_inode);
176 176
177 auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL); 177 auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, cachefiles_gfp);
178 if (!auxbuf) { 178 if (!auxbuf) {
179 _leave(" = -ENOMEM"); 179 _leave(" = -ENOMEM");
180 return -ENOMEM; 180 return -ENOMEM;