aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/file.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2012-05-16 07:13:17 -0400
committerSteve French <sfrench@us.ibm.com>2012-05-16 21:13:31 -0400
commit1c89254926c0643b99541d422c909762479aeef8 (patch)
treef1124d07f10359eaf1ed51ba93c7e183b057cdba /fs/cifs/file.c
parent2a1bb13853300bbb5a58eab006189d2c0dc215a0 (diff)
cifs: convert cifs_iovec_read to use async reads
Convert cifs_iovec_read to use async I/O. This also raises the limit on the rsize for uncached reads. We first allocate a set of pages to hold the replies, then issue the reads in parallel and then collect the replies and copy the results into the iovec. A possible future optimization would be to kmap and inline the iovec buffers and read the data directly from the socket into that. That would require some rather complex conversion of the iovec into a kvec however. Signed-off-by: Jeff Layton <jlayton@redhat.com>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r--fs/cifs/file.c294
1 files changed, 234 insertions, 60 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d2a4259408e6..4b5fe398cbf6 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2348,6 +2348,8 @@ cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
2348 sizeof(struct kvec) * nr_vecs, GFP_KERNEL); 2348 sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
2349 if (rdata != NULL) { 2349 if (rdata != NULL) {
2350 kref_init(&rdata->refcount); 2350 kref_init(&rdata->refcount);
2351 INIT_LIST_HEAD(&rdata->list);
2352 init_completion(&rdata->done);
2351 INIT_WORK(&rdata->work, complete); 2353 INIT_WORK(&rdata->work, complete);
2352 INIT_LIST_HEAD(&rdata->pages); 2354 INIT_LIST_HEAD(&rdata->pages);
2353 } 2355 }
@@ -2367,6 +2369,45 @@ cifs_readdata_release(struct kref *refcount)
2367} 2369}
2368 2370
2369static int 2371static int
2372cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
2373{
2374 int rc = 0;
2375 struct page *page, *tpage;
2376 unsigned int i;
2377
2378 for (i = 0; i < npages; i++) {
2379 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2380 if (!page) {
2381 rc = -ENOMEM;
2382 break;
2383 }
2384 list_add(&page->lru, list);
2385 }
2386
2387 if (rc) {
2388 list_for_each_entry_safe(page, tpage, list, lru) {
2389 list_del(&page->lru);
2390 put_page(page);
2391 }
2392 }
2393 return rc;
2394}
2395
2396static void
2397cifs_uncached_readdata_release(struct kref *refcount)
2398{
2399 struct page *page, *tpage;
2400 struct cifs_readdata *rdata = container_of(refcount,
2401 struct cifs_readdata, refcount);
2402
2403 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2404 list_del(&page->lru);
2405 put_page(page);
2406 }
2407 cifs_readdata_release(refcount);
2408}
2409
2410static int
2370cifs_retry_async_readv(struct cifs_readdata *rdata) 2411cifs_retry_async_readv(struct cifs_readdata *rdata)
2371{ 2412{
2372 int rc; 2413 int rc;
@@ -2383,24 +2424,139 @@ cifs_retry_async_readv(struct cifs_readdata *rdata)
2383 return rc; 2424 return rc;
2384} 2425}
2385 2426
2427/**
2428 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2429 * @rdata: the readdata response with list of pages holding data
2430 * @iov: vector in which we should copy the data
2431 * @nr_segs: number of segments in vector
2432 * @offset: offset into file of the first iovec
2433 * @copied: used to return the amount of data copied to the iov
2434 *
2435 * This function copies data from a list of pages in a readdata response into
2436 * an array of iovecs. It will first calculate where the data should go
2437 * based on the info in the readdata and then copy the data into that spot.
2438 */
2439static ssize_t
2440cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2441 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2442{
2443 int rc = 0;
2444 struct iov_iter ii;
2445 size_t pos = rdata->offset - offset;
2446 struct page *page, *tpage;
2447 ssize_t remaining = rdata->bytes;
2448 unsigned char *pdata;
2449
2450 /* set up iov_iter and advance to the correct offset */
2451 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2452 iov_iter_advance(&ii, pos);
2453
2454 *copied = 0;
2455 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2456 ssize_t copy;
2457
2458 /* copy a whole page or whatever's left */
2459 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2460
2461 /* ...but limit it to whatever space is left in the iov */
2462 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2463
2464 /* go while there's data to be copied and no errors */
2465 if (copy && !rc) {
2466 pdata = kmap(page);
2467 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2468 (int)copy);
2469 kunmap(page);
2470 if (!rc) {
2471 *copied += copy;
2472 remaining -= copy;
2473 iov_iter_advance(&ii, copy);
2474 }
2475 }
2476
2477 list_del(&page->lru);
2478 put_page(page);
2479 }
2480
2481 return rc;
2482}
2483
2484static void
2485cifs_uncached_readv_complete(struct work_struct *work)
2486{
2487 struct cifs_readdata *rdata = container_of(work,
2488 struct cifs_readdata, work);
2489
2490 /* if the result is non-zero then the pages weren't kmapped */
2491 if (rdata->result == 0) {
2492 struct page *page;
2493
2494 list_for_each_entry(page, &rdata->pages, lru)
2495 kunmap(page);
2496 }
2497
2498 complete(&rdata->done);
2499 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2500}
2501
2502static int
2503cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2504 unsigned int remaining)
2505{
2506 int len = 0;
2507 struct page *page, *tpage;
2508
2509 rdata->nr_iov = 1;
2510 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2511 if (remaining >= PAGE_SIZE) {
2512 /* enough data to fill the page */
2513 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2514 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2515 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2516 rdata->nr_iov, page->index,
2517 rdata->iov[rdata->nr_iov].iov_base,
2518 rdata->iov[rdata->nr_iov].iov_len);
2519 ++rdata->nr_iov;
2520 len += PAGE_SIZE;
2521 remaining -= PAGE_SIZE;
2522 } else if (remaining > 0) {
2523 /* enough for partial page, fill and zero the rest */
2524 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2525 rdata->iov[rdata->nr_iov].iov_len = remaining;
2526 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2527 rdata->nr_iov, page->index,
2528 rdata->iov[rdata->nr_iov].iov_base,
2529 rdata->iov[rdata->nr_iov].iov_len);
2530 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2531 '\0', PAGE_SIZE - remaining);
2532 ++rdata->nr_iov;
2533 len += remaining;
2534 remaining = 0;
2535 } else {
2536 /* no need to hold page hostage */
2537 list_del(&page->lru);
2538 put_page(page);
2539 }
2540 }
2541
2542 return len;
2543}
2544
2386static ssize_t 2545static ssize_t
2387cifs_iovec_read(struct file *file, const struct iovec *iov, 2546cifs_iovec_read(struct file *file, const struct iovec *iov,
2388 unsigned long nr_segs, loff_t *poffset) 2547 unsigned long nr_segs, loff_t *poffset)
2389{ 2548{
2390 int rc; 2549 ssize_t rc;
2391 int xid;
2392 ssize_t total_read;
2393 unsigned int bytes_read = 0;
2394 size_t len, cur_len; 2550 size_t len, cur_len;
2395 int iov_offset = 0; 2551 ssize_t total_read = 0;
2552 loff_t offset = *poffset;
2553 unsigned int npages;
2396 struct cifs_sb_info *cifs_sb; 2554 struct cifs_sb_info *cifs_sb;
2397 struct cifs_tcon *pTcon; 2555 struct cifs_tcon *tcon;
2398 struct cifsFileInfo *open_file; 2556 struct cifsFileInfo *open_file;
2399 struct smb_com_read_rsp *pSMBr; 2557 struct cifs_readdata *rdata, *tmp;
2400 struct cifs_io_parms io_parms; 2558 struct list_head rdata_list;
2401 char *read_data; 2559 pid_t pid;
2402 unsigned int rsize;
2403 __u32 pid;
2404 2560
2405 if (!nr_segs) 2561 if (!nr_segs)
2406 return 0; 2562 return 0;
@@ -2409,14 +2565,10 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
2409 if (!len) 2565 if (!len)
2410 return 0; 2566 return 0;
2411 2567
2412 xid = GetXid(); 2568 INIT_LIST_HEAD(&rdata_list);
2413 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2569 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2414
2415 /* FIXME: set up handlers for larger reads and/or convert to async */
2416 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2417
2418 open_file = file->private_data; 2570 open_file = file->private_data;
2419 pTcon = tlink_tcon(open_file->tlink); 2571 tcon = tlink_tcon(open_file->tlink);
2420 2572
2421 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 2573 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2422 pid = open_file->pid; 2574 pid = open_file->pid;
@@ -2426,56 +2578,78 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
2426 if ((file->f_flags & O_ACCMODE) == O_WRONLY) 2578 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2427 cFYI(1, "attempting read on write only file instance"); 2579 cFYI(1, "attempting read on write only file instance");
2428 2580
2429 for (total_read = 0; total_read < len; total_read += bytes_read) { 2581 do {
2430 cur_len = min_t(const size_t, len - total_read, rsize); 2582 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2431 rc = -EAGAIN; 2583 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
2432 read_data = NULL;
2433 2584
2434 while (rc == -EAGAIN) { 2585 /* allocate a readdata struct */
2435 int buf_type = CIFS_NO_BUFFER; 2586 rdata = cifs_readdata_alloc(npages,
2436 if (open_file->invalidHandle) { 2587 cifs_uncached_readv_complete);
2437 rc = cifs_reopen_file(open_file, true); 2588 if (!rdata) {
2438 if (rc != 0) 2589 rc = -ENOMEM;
2439 break; 2590 goto error;
2440 }
2441 io_parms.netfid = open_file->netfid;
2442 io_parms.pid = pid;
2443 io_parms.tcon = pTcon;
2444 io_parms.offset = *poffset;
2445 io_parms.length = cur_len;
2446 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2447 &read_data, &buf_type);
2448 pSMBr = (struct smb_com_read_rsp *)read_data;
2449 if (read_data) {
2450 char *data_offset = read_data + 4 +
2451 le16_to_cpu(pSMBr->DataOffset);
2452 if (memcpy_toiovecend(iov, data_offset,
2453 iov_offset, bytes_read))
2454 rc = -EFAULT;
2455 if (buf_type == CIFS_SMALL_BUFFER)
2456 cifs_small_buf_release(read_data);
2457 else if (buf_type == CIFS_LARGE_BUFFER)
2458 cifs_buf_release(read_data);
2459 read_data = NULL;
2460 iov_offset += bytes_read;
2461 }
2462 } 2591 }
2463 2592
2464 if (rc || (bytes_read == 0)) { 2593 rc = cifs_read_allocate_pages(&rdata->pages, npages);
2465 if (total_read) { 2594 if (rc)
2466 break; 2595 goto error;
2467 } else { 2596
2468 FreeXid(xid); 2597 rdata->cfile = cifsFileInfo_get(open_file);
2469 return rc; 2598 rdata->offset = offset;
2599 rdata->bytes = cur_len;
2600 rdata->pid = pid;
2601 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2602
2603 rc = cifs_retry_async_readv(rdata);
2604error:
2605 if (rc) {
2606 kref_put(&rdata->refcount,
2607 cifs_uncached_readdata_release);
2608 break;
2609 }
2610
2611 list_add_tail(&rdata->list, &rdata_list);
2612 offset += cur_len;
2613 len -= cur_len;
2614 } while (len > 0);
2615
2616 /* if at least one read request send succeeded, then reset rc */
2617 if (!list_empty(&rdata_list))
2618 rc = 0;
2619
2620 /* the loop below should proceed in the order of increasing offsets */
2621restart_loop:
2622 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2623 if (!rc) {
2624 ssize_t copied;
2625
2626 /* FIXME: freezable sleep too? */
2627 rc = wait_for_completion_killable(&rdata->done);
2628 if (rc)
2629 rc = -EINTR;
2630 else if (rdata->result)
2631 rc = rdata->result;
2632 else {
2633 rc = cifs_readdata_to_iov(rdata, iov,
2634 nr_segs, *poffset,
2635 &copied);
2636 total_read += copied;
2637 }
2638
2639 /* resend call if it's a retryable error */
2640 if (rc == -EAGAIN) {
2641 rc = cifs_retry_async_readv(rdata);
2642 goto restart_loop;
2470 } 2643 }
2471 } else {
2472 cifs_stats_bytes_read(pTcon, bytes_read);
2473 *poffset += bytes_read;
2474 } 2644 }
2645 list_del_init(&rdata->list);
2646 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2475 } 2647 }
2476 2648
2477 FreeXid(xid); 2649 cifs_stats_bytes_read(tcon, total_read);
2478 return total_read; 2650 *poffset += total_read;
2651
2652 return total_read ? total_read : rc;
2479} 2653}
2480 2654
2481ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov, 2655ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,