summaryrefslogtreecommitdiffstats
path: root/fs/fscache
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2015-02-24 05:05:29 -0500
committerDavid Howells <dhowells@redhat.com>2015-04-02 09:28:53 -0400
commit4a47132ff472a0c2c5441baeb50cf97f2580bc43 (patch)
treed6ef9c9d598ed89c3b503728f9ce2a8b3ce21e3d /fs/fscache
parentd3b97ca4a99e4e6c78f5a21c968eadf5c8ba9971 (diff)
FS-Cache: Retain the netfs context in the retrieval op earlier
Now that the retrieval operation may be disposed of by fscache_put_operation() before we actually set the context, the retrieval-specific cleanup operation can produce a NULL-pointer dereference when it tries to unconditionally clean up the netfs context. Given that it is expected that we'll get at least as far as the place where we currently set the context pointer and it is unlikely we'll go through the error handling paths prior to that point, retain the context right from the point that the retrieval op is allocated. Concomitant to this, we need to retain the cookie pointer in the retrieval op also so that we can call the netfs to release its context in the release method. In addition, we might now get into fscache_release_retrieval_op() with the op only initialised. To this end, set the operation to DEAD only after the release method has been called and skip the n_pages test upon cleanup if the op is still in the INITIALISED state. Without these changes, the following oops might be seen: BUG: unable to handle kernel NULL pointer dereference at 00000000000000b8 ... RIP: 0010:[<ffffffffa0089c98>] fscache_release_retrieval_op+0xae/0x100 ... Call Trace: [<ffffffffa0088560>] fscache_put_operation+0x117/0x2e0 [<ffffffffa008b8f5>] __fscache_read_or_alloc_pages+0x351/0x3ac [<ffffffffa00b761f>] __nfs_readpages_from_fscache+0x59/0xbf [nfs] [<ffffffffa00b06c5>] nfs_readpages+0x10c/0x185 [nfs] [<ffffffff81124925>] ? alloc_pages_current+0x119/0x13e [<ffffffff810ee5fd>] ? __page_cache_alloc+0xfb/0x10a [<ffffffff810f87f8>] __do_page_cache_readahead+0x188/0x22c [<ffffffff810f8b3a>] ondemand_readahead+0x29e/0x2af [<ffffffff810f8c92>] page_cache_sync_readahead+0x38/0x3a [<ffffffff810ef337>] generic_file_read_iter+0x1a2/0x55a [<ffffffffa00a9dff>] ? nfs_revalidate_mapping+0xd6/0x288 [nfs] [<ffffffffa00a6a23>] nfs_file_read+0x49/0x70 [nfs] [<ffffffff811363be>] new_sync_read+0x78/0x9c [<ffffffff81137164>] __vfs_read+0x13/0x38 [<ffffffff8113721e>] vfs_read+0x95/0x121 [<ffffffff811372f6>] SyS_read+0x4c/0x8a [<ffffffff81557a52>] system_call_fastpath+0x12/0x17 Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Steve Dickson <steved@redhat.com> Acked-by: Jeff Layton <jeff.layton@primarydata.com>
Diffstat (limited to 'fs/fscache')
-rw-r--r--fs/fscache/operation.c2
-rw-r--r--fs/fscache/page.c20
2 files changed, 11 insertions, 11 deletions
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index 57d4abb68656..de67745e1cd7 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -492,7 +492,6 @@ void fscache_put_operation(struct fscache_operation *op)
492 ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED && 492 ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
493 op->state != FSCACHE_OP_ST_COMPLETE, 493 op->state != FSCACHE_OP_ST_COMPLETE,
494 op->state, ==, FSCACHE_OP_ST_CANCELLED); 494 op->state, ==, FSCACHE_OP_ST_CANCELLED);
495 op->state = FSCACHE_OP_ST_DEAD;
496 495
497 fscache_stat(&fscache_n_op_release); 496 fscache_stat(&fscache_n_op_release);
498 497
@@ -500,6 +499,7 @@ void fscache_put_operation(struct fscache_operation *op)
500 op->release(op); 499 op->release(op);
501 op->release = NULL; 500 op->release = NULL;
502 } 501 }
502 op->state = FSCACHE_OP_ST_DEAD;
503 503
504 object = op->object; 504 object = op->object;
505 if (likely(object)) { 505 if (likely(object)) {
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index d1b23a67c031..483bbc613bf0 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -269,11 +269,12 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
269 269
270 _enter("{OP%x}", op->op.debug_id); 270 _enter("{OP%x}", op->op.debug_id);
271 271
272 ASSERTCMP(atomic_read(&op->n_pages), ==, 0); 272 ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
273 atomic_read(&op->n_pages), ==, 0);
273 274
274 fscache_hist(fscache_retrieval_histogram, op->start_time); 275 fscache_hist(fscache_retrieval_histogram, op->start_time);
275 if (op->context) 276 if (op->context)
276 fscache_put_context(op->op.object->cookie, op->context); 277 fscache_put_context(op->cookie, op->context);
277 278
278 _leave(""); 279 _leave("");
279} 280}
@@ -302,11 +303,18 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
302 op->op.flags = FSCACHE_OP_MYTHREAD | 303 op->op.flags = FSCACHE_OP_MYTHREAD |
303 (1UL << FSCACHE_OP_WAITING) | 304 (1UL << FSCACHE_OP_WAITING) |
304 (1UL << FSCACHE_OP_UNUSE_COOKIE); 305 (1UL << FSCACHE_OP_UNUSE_COOKIE);
306 op->cookie = cookie;
305 op->mapping = mapping; 307 op->mapping = mapping;
306 op->end_io_func = end_io_func; 308 op->end_io_func = end_io_func;
307 op->context = context; 309 op->context = context;
308 op->start_time = jiffies; 310 op->start_time = jiffies;
309 INIT_LIST_HEAD(&op->to_do); 311 INIT_LIST_HEAD(&op->to_do);
312
313 /* Pin the netfs read context in case we need to do the actual netfs
314 * read because we've encountered a cache read failure.
315 */
316 if (context)
317 fscache_get_context(op->cookie, context);
310 return op; 318 return op;
311} 319}
312 320
@@ -456,10 +464,6 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
456 464
457 fscache_stat(&fscache_n_retrieval_ops); 465 fscache_stat(&fscache_n_retrieval_ops);
458 466
459 /* pin the netfs read context in case we need to do the actual netfs
460 * read because we've encountered a cache read failure */
461 fscache_get_context(object->cookie, op->context);
462
463 /* we wait for the operation to become active, and then process it 467 /* we wait for the operation to become active, and then process it
464 * *here*, in this thread, and not in the thread pool */ 468 * *here*, in this thread, and not in the thread pool */
465 ret = fscache_wait_for_operation_activation( 469 ret = fscache_wait_for_operation_activation(
@@ -586,10 +590,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
586 590
587 fscache_stat(&fscache_n_retrieval_ops); 591 fscache_stat(&fscache_n_retrieval_ops);
588 592
589 /* pin the netfs read context in case we need to do the actual netfs
590 * read because we've encountered a cache read failure */
591 fscache_get_context(object->cookie, op->context);
592
593 /* we wait for the operation to become active, and then process it 593 /* we wait for the operation to become active, and then process it
594 * *here*, in this thread, and not in the thread pool */ 594 * *here*, in this thread, and not in the thread pool */
595 ret = fscache_wait_for_operation_activation( 595 ret = fscache_wait_for_operation_activation(