aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs
diff options
context:
space:
mode:
authorSteve French <smfrench@gmail.com>2011-10-19 22:22:41 -0400
committerSteve French <smfrench@gmail.com>2011-10-19 22:22:41 -0400
commitfbcae3ea169189ee49dc6820478cb1d069b80a08 (patch)
tree5e5b35c3bc9c11082697c4190ff96c6a4291ef16 /fs/cifs
parent71c424bac5679200e272357a225639da8bf94068 (diff)
parentf06ac72e929115f2772c29727152ba0832d641e4 (diff)
Merge branch 'cifs-3.2' of git://git.samba.org/jlayton/linux into temp-3.2-jeff
Diffstat (limited to 'fs/cifs')
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h29
-rw-r--r--fs/cifs/cifspdu.h4
-rw-r--r--fs/cifs/cifsproto.h29
-rw-r--r--fs/cifs/cifssmb.c364
-rw-r--r--fs/cifs/connect.c515
-rw-r--r--fs/cifs/file.c295
-rw-r--r--fs/cifs/transport.c8
8 files changed, 880 insertions, 366 deletions
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 96a48baad8f7..f219dccbe15a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -74,7 +74,7 @@ module_param(cifs_min_small, int, 0);
74MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " 74MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
75 "Range: 2 to 256"); 75 "Range: 2 to 256");
76unsigned int cifs_max_pending = CIFS_MAX_REQ; 76unsigned int cifs_max_pending = CIFS_MAX_REQ;
77module_param(cifs_max_pending, int, 0); 77module_param(cifs_max_pending, int, 0444);
78MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " 78MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
79 "Default: 50 Range: 2 to 256"); 79 "Default: 50 Range: 2 to 256");
80unsigned short echo_retries = 5; 80unsigned short echo_retries = 5;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 55ebf39fb3fd..d153d0b89d39 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -291,7 +291,13 @@ struct TCP_Server_Info {
291 bool sec_kerberosu2u; /* supports U2U Kerberos */ 291 bool sec_kerberosu2u; /* supports U2U Kerberos */
292 bool sec_kerberos; /* supports plain Kerberos */ 292 bool sec_kerberos; /* supports plain Kerberos */
293 bool sec_mskerberos; /* supports legacy MS Kerberos */ 293 bool sec_mskerberos; /* supports legacy MS Kerberos */
294 bool large_buf; /* is current buffer large? */
294 struct delayed_work echo; /* echo ping workqueue job */ 295 struct delayed_work echo; /* echo ping workqueue job */
296 struct kvec *iov; /* reusable kvec array for receives */
297 unsigned int nr_iov; /* number of kvecs in array */
298 char *smallbuf; /* pointer to current "small" buffer */
299 char *bigbuf; /* pointer to current "big" buffer */
300 unsigned int total_read; /* total amount of data read in this pass */
295#ifdef CONFIG_CIFS_FSCACHE 301#ifdef CONFIG_CIFS_FSCACHE
296 struct fscache_cookie *fscache; /* client index cache cookie */ 302 struct fscache_cookie *fscache; /* client index cache cookie */
297#endif 303#endif
@@ -650,8 +656,24 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
650struct mid_q_entry; 656struct mid_q_entry;
651 657
652/* 658/*
653 * This is the prototype for the mid callback function. When creating one, 659 * This is the prototype for the mid receive function. This function is for
654 * take special care to avoid deadlocks. Things to bear in mind: 660 * receiving the rest of the SMB frame, starting with the WordCount (which is
661 * just after the MID in struct smb_hdr). Note:
662 *
663 * - This will be called by cifsd, with no locks held.
664 * - The mid will still be on the pending_mid_q.
665 * - mid->resp_buf will point to the current buffer.
666 *
667 * Returns zero on a successful receive, or an error. The receive state in
668 * the TCP_Server_Info will also be updated.
669 */
670typedef int (mid_receive_t)(struct TCP_Server_Info *server,
671 struct mid_q_entry *mid);
672
673/*
674 * This is the prototype for the mid callback function. This is called once the
675 * mid has been received off of the socket. When creating one, take special
676 * care to avoid deadlocks. Things to bear in mind:
655 * 677 *
656 * - it will be called by cifsd, with no locks held 678 * - it will be called by cifsd, with no locks held
657 * - the mid will be removed from any lists 679 * - the mid will be removed from any lists
@@ -669,9 +691,10 @@ struct mid_q_entry {
669 unsigned long when_sent; /* time when smb send finished */ 691 unsigned long when_sent; /* time when smb send finished */
670 unsigned long when_received; /* when demux complete (taken off wire) */ 692 unsigned long when_received; /* when demux complete (taken off wire) */
671#endif 693#endif
694 mid_receive_t *receive; /* call receive callback */
672 mid_callback_t *callback; /* call completion callback */ 695 mid_callback_t *callback; /* call completion callback */
673 void *callback_data; /* general purpose pointer for callback */ 696 void *callback_data; /* general purpose pointer for callback */
674 struct smb_hdr *resp_buf; /* response buffer */ 697 struct smb_hdr *resp_buf; /* pointer to received SMB header */
675 int midState; /* wish this were enum but can not pass to wait_event */ 698 int midState; /* wish this were enum but can not pass to wait_event */
676 __u8 command; /* smb command code */ 699 __u8 command; /* smb command code */
677 bool largeBuf:1; /* if valid response, is pointer to large buf */ 700 bool largeBuf:1; /* if valid response, is pointer to large buf */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index de3aa285de03..3c6ef34fe2bc 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1089,9 +1089,7 @@ typedef struct smb_com_read_rsp {
1089 __le16 DataLengthHigh; 1089 __le16 DataLengthHigh;
1090 __u64 Reserved2; 1090 __u64 Reserved2;
1091 __u16 ByteCount; 1091 __u16 ByteCount;
1092 __u8 Pad; /* BB check for whether padded to DWORD 1092 /* read response data immediately follows */
1093 boundary and optimum performance here */
1094 char Data[1];
1095} __attribute__((packed)) READ_RSP; 1093} __attribute__((packed)) READ_RSP;
1096 1094
1097typedef struct locking_andx_range { 1095typedef struct locking_andx_range {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index a1fa9cec05d6..c25d0636cc4f 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -69,8 +69,9 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
69 struct TCP_Server_Info *server); 69 struct TCP_Server_Info *server);
70extern void DeleteMidQEntry(struct mid_q_entry *midEntry); 70extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
71extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, 71extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
72 unsigned int nvec, mid_callback_t *callback, 72 unsigned int nvec, mid_receive_t *receive,
73 void *cbdata, bool ignore_pend); 73 mid_callback_t *callback, void *cbdata,
74 bool ignore_pend);
74extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, 75extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
75 struct smb_hdr * /* input */ , 76 struct smb_hdr * /* input */ ,
76 struct smb_hdr * /* out */ , 77 struct smb_hdr * /* out */ ,
@@ -153,6 +154,12 @@ extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
153extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, 154extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
154 const char *, int); 155 const char *, int);
155 156
157extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
158extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
159 unsigned int to_read);
160extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
161 struct kvec *iov_orig, unsigned int nr_segs,
162 unsigned int to_read);
156extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, 163extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
157 struct cifs_sb_info *cifs_sb); 164 struct cifs_sb_info *cifs_sb);
158extern int cifs_match_super(struct super_block *, void *); 165extern int cifs_match_super(struct super_block *, void *);
@@ -442,6 +449,24 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
442extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, 449extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
443 unsigned char *p24); 450 unsigned char *p24);
444 451
452/* asynchronous read support */
453struct cifs_readdata {
454 struct cifsFileInfo *cfile;
455 struct address_space *mapping;
456 __u64 offset;
457 unsigned int bytes;
458 pid_t pid;
459 int result;
460 struct list_head pages;
461 struct work_struct work;
462 unsigned int nr_iov;
463 struct kvec iov[1];
464};
465
466struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages);
467void cifs_readdata_free(struct cifs_readdata *rdata);
468int cifs_async_readv(struct cifs_readdata *rdata);
469
445/* asynchronous write support */ 470/* asynchronous write support */
446struct cifs_writedata { 471struct cifs_writedata {
447 struct kref refcount; 472 struct kref refcount;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index c824c106b2b7..aaad4ce6e6c5 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -33,6 +33,8 @@
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/posix_acl_xattr.h> 34#include <linux/posix_acl_xattr.h>
35#include <linux/pagemap.h> 35#include <linux/pagemap.h>
36#include <linux/swap.h>
37#include <linux/task_io_accounting_ops.h>
36#include <asm/uaccess.h> 38#include <asm/uaccess.h>
37#include "cifspdu.h" 39#include "cifspdu.h"
38#include "cifsglob.h" 40#include "cifsglob.h"
@@ -40,6 +42,7 @@
40#include "cifsproto.h" 42#include "cifsproto.h"
41#include "cifs_unicode.h" 43#include "cifs_unicode.h"
42#include "cifs_debug.h" 44#include "cifs_debug.h"
45#include "fscache.h"
43 46
44#ifdef CONFIG_CIFS_POSIX 47#ifdef CONFIG_CIFS_POSIX
45static struct { 48static struct {
@@ -83,6 +86,9 @@ static struct {
83#endif /* CONFIG_CIFS_WEAK_PW_HASH */ 86#endif /* CONFIG_CIFS_WEAK_PW_HASH */
84#endif /* CIFS_POSIX */ 87#endif /* CIFS_POSIX */
85 88
89/* Forward declarations */
90static void cifs_readv_complete(struct work_struct *work);
91
86/* Mark as invalid, all open files on tree connections since they 92/* Mark as invalid, all open files on tree connections since they
87 were closed when session to server was lost */ 93 were closed when session to server was lost */
88static void mark_open_files_invalid(struct cifs_tcon *pTcon) 94static void mark_open_files_invalid(struct cifs_tcon *pTcon)
@@ -737,7 +743,8 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
737 iov.iov_base = smb; 743 iov.iov_base = smb;
738 iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; 744 iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
739 745
740 rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true); 746 rc = cifs_call_async(server, &iov, 1, NULL, cifs_echo_callback,
747 server, true);
741 if (rc) 748 if (rc)
742 cFYI(1, "Echo request failed: %d", rc); 749 cFYI(1, "Echo request failed: %d", rc);
743 750
@@ -1374,6 +1381,359 @@ openRetry:
1374 return rc; 1381 return rc;
1375} 1382}
1376 1383
1384struct cifs_readdata *
1385cifs_readdata_alloc(unsigned int nr_pages)
1386{
1387 struct cifs_readdata *rdata;
1388
1389 /* readdata + 1 kvec for each page */
1390 rdata = kzalloc(sizeof(*rdata) +
1391 sizeof(struct kvec) * nr_pages, GFP_KERNEL);
1392 if (rdata != NULL) {
1393 INIT_WORK(&rdata->work, cifs_readv_complete);
1394 INIT_LIST_HEAD(&rdata->pages);
1395 }
1396 return rdata;
1397}
1398
1399void
1400cifs_readdata_free(struct cifs_readdata *rdata)
1401{
1402 cifsFileInfo_put(rdata->cfile);
1403 kfree(rdata);
1404}
1405
1406/*
1407 * Discard any remaining data in the current SMB. To do this, we borrow the
1408 * current bigbuf.
1409 */
1410static int
1411cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1412{
1413 READ_RSP *rsp = (READ_RSP *)server->smallbuf;
1414 unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length);
1415 int remaining = rfclen + 4 - server->total_read;
1416 struct cifs_readdata *rdata = mid->callback_data;
1417
1418 while (remaining > 0) {
1419 int length;
1420
1421 length = cifs_read_from_socket(server, server->bigbuf,
1422 min_t(unsigned int, remaining,
1423 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE));
1424 if (length < 0)
1425 return length;
1426 server->total_read += length;
1427 remaining -= length;
1428 }
1429
1430 dequeue_mid(mid, rdata->result);
1431 return 0;
1432}
1433
1434static int
1435cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1436{
1437 int length, len;
1438 unsigned int data_offset, remaining, data_len;
1439 struct cifs_readdata *rdata = mid->callback_data;
1440 READ_RSP *rsp = (READ_RSP *)server->smallbuf;
1441 unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4;
1442 u64 eof;
1443 pgoff_t eof_index;
1444 struct page *page, *tpage;
1445
1446 cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__,
1447 mid->mid, rdata->offset, rdata->bytes);
1448
1449 /*
1450 * read the rest of READ_RSP header (sans Data array), or whatever we
1451 * can if there's not enough data. At this point, we've read down to
1452 * the Mid.
1453 */
1454 len = min_t(unsigned int, rfclen, sizeof(*rsp)) -
1455 sizeof(struct smb_hdr) + 1;
1456
1457 rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1;
1458 rdata->iov[0].iov_len = len;
1459
1460 length = cifs_readv_from_socket(server, rdata->iov, 1, len);
1461 if (length < 0)
1462 return length;
1463 server->total_read += length;
1464
1465 /* Was the SMB read successful? */
1466 rdata->result = map_smb_to_linux_error(&rsp->hdr, false);
1467 if (rdata->result != 0) {
1468 cFYI(1, "%s: server returned error %d", __func__,
1469 rdata->result);
1470 return cifs_readv_discard(server, mid);
1471 }
1472
1473 /* Is there enough to get to the rest of the READ_RSP header? */
1474 if (server->total_read < sizeof(READ_RSP)) {
1475 cFYI(1, "%s: server returned short header. got=%u expected=%zu",
1476 __func__, server->total_read, sizeof(READ_RSP));
1477 rdata->result = -EIO;
1478 return cifs_readv_discard(server, mid);
1479 }
1480
1481 data_offset = le16_to_cpu(rsp->DataOffset) + 4;
1482 if (data_offset < server->total_read) {
1483 /*
1484 * win2k8 sometimes sends an offset of 0 when the read
1485 * is beyond the EOF. Treat it as if the data starts just after
1486 * the header.
1487 */
1488 cFYI(1, "%s: data offset (%u) inside read response header",
1489 __func__, data_offset);
1490 data_offset = server->total_read;
1491 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1492 /* data_offset is beyond the end of smallbuf */
1493 cFYI(1, "%s: data offset (%u) beyond end of smallbuf",
1494 __func__, data_offset);
1495 rdata->result = -EIO;
1496 return cifs_readv_discard(server, mid);
1497 }
1498
1499 cFYI(1, "%s: total_read=%u data_offset=%u", __func__,
1500 server->total_read, data_offset);
1501
1502 len = data_offset - server->total_read;
1503 if (len > 0) {
1504 /* read any junk before data into the rest of smallbuf */
1505 rdata->iov[0].iov_base = server->smallbuf + server->total_read;
1506 rdata->iov[0].iov_len = len;
1507 length = cifs_readv_from_socket(server, rdata->iov, 1, len);
1508 if (length < 0)
1509 return length;
1510 server->total_read += length;
1511 }
1512
1513 /* set up first iov for signature check */
1514 rdata->iov[0].iov_base = server->smallbuf;
1515 rdata->iov[0].iov_len = server->total_read;
1516 cFYI(1, "0: iov_base=%p iov_len=%zu",
1517 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1518
1519 /* how much data is in the response? */
1520 data_len = le16_to_cpu(rsp->DataLengthHigh) << 16;
1521 data_len += le16_to_cpu(rsp->DataLength);
1522 if (data_offset + data_len > rfclen) {
1523 /* data_len is corrupt -- discard frame */
1524 rdata->result = -EIO;
1525 return cifs_readv_discard(server, mid);
1526 }
1527
1528 /* marshal up the page array */
1529 len = 0;
1530 remaining = data_len;
1531 rdata->nr_iov = 1;
1532
1533 /* determine the eof that the server (probably) has */
1534 eof = CIFS_I(rdata->mapping->host)->server_eof;
1535 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
1536 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
1537
1538 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
1539 if (remaining >= PAGE_CACHE_SIZE) {
1540 /* enough data to fill the page */
1541 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
1542 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
1543 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
1544 rdata->nr_iov, page->index,
1545 rdata->iov[rdata->nr_iov].iov_base,
1546 rdata->iov[rdata->nr_iov].iov_len);
1547 ++rdata->nr_iov;
1548 len += PAGE_CACHE_SIZE;
1549 remaining -= PAGE_CACHE_SIZE;
1550 } else if (remaining > 0) {
1551 /* enough for partial page, fill and zero the rest */
1552 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
1553 rdata->iov[rdata->nr_iov].iov_len = remaining;
1554 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
1555 rdata->nr_iov, page->index,
1556 rdata->iov[rdata->nr_iov].iov_base,
1557 rdata->iov[rdata->nr_iov].iov_len);
1558 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
1559 '\0', PAGE_CACHE_SIZE - remaining);
1560 ++rdata->nr_iov;
1561 len += remaining;
1562 remaining = 0;
1563 } else if (page->index > eof_index) {
1564 /*
1565 * The VFS will not try to do readahead past the
1566 * i_size, but it's possible that we have outstanding
1567 * writes with gaps in the middle and the i_size hasn't
1568 * caught up yet. Populate those with zeroed out pages
1569 * to prevent the VFS from repeatedly attempting to
1570 * fill them until the writes are flushed.
1571 */
1572 zero_user(page, 0, PAGE_CACHE_SIZE);
1573 list_del(&page->lru);
1574 lru_cache_add_file(page);
1575 flush_dcache_page(page);
1576 SetPageUptodate(page);
1577 unlock_page(page);
1578 page_cache_release(page);
1579 } else {
1580 /* no need to hold page hostage */
1581 list_del(&page->lru);
1582 lru_cache_add_file(page);
1583 unlock_page(page);
1584 page_cache_release(page);
1585 }
1586 }
1587
1588 /* issue the read if we have any iovecs left to fill */
1589 if (rdata->nr_iov > 1) {
1590 length = cifs_readv_from_socket(server, &rdata->iov[1],
1591 rdata->nr_iov - 1, len);
1592 if (length < 0)
1593 return length;
1594 server->total_read += length;
1595 } else {
1596 length = 0;
1597 }
1598
1599 rdata->bytes = length;
1600
1601 cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read,
1602 rfclen, remaining);
1603
1604 /* discard anything left over */
1605 if (server->total_read < rfclen)
1606 return cifs_readv_discard(server, mid);
1607
1608 dequeue_mid(mid, false);
1609 return length;
1610}
1611
1612static void
1613cifs_readv_complete(struct work_struct *work)
1614{
1615 struct cifs_readdata *rdata = container_of(work,
1616 struct cifs_readdata, work);
1617 struct page *page, *tpage;
1618
1619 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
1620 list_del(&page->lru);
1621 lru_cache_add_file(page);
1622 kunmap(page);
1623
1624 if (rdata->result == 0) {
1625 flush_dcache_page(page);
1626 SetPageUptodate(page);
1627 }
1628
1629 unlock_page(page);
1630
1631 if (rdata->result == 0)
1632 cifs_readpage_to_fscache(rdata->mapping->host, page);
1633
1634 page_cache_release(page);
1635 }
1636 cifs_readdata_free(rdata);
1637}
1638
1639static void
1640cifs_readv_callback(struct mid_q_entry *mid)
1641{
1642 struct cifs_readdata *rdata = mid->callback_data;
1643 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1644 struct TCP_Server_Info *server = tcon->ses->server;
1645
1646 cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__,
1647 mid->mid, mid->midState, rdata->result, rdata->bytes);
1648
1649 switch (mid->midState) {
1650 case MID_RESPONSE_RECEIVED:
1651 /* result already set, check signature */
1652 if (server->sec_mode &
1653 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
1654 if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
1655 server, mid->sequence_number + 1))
1656 cERROR(1, "Unexpected SMB signature");
1657 }
1658 /* FIXME: should this be counted toward the initiating task? */
1659 task_io_account_read(rdata->bytes);
1660 cifs_stats_bytes_read(tcon, rdata->bytes);
1661 break;
1662 case MID_REQUEST_SUBMITTED:
1663 case MID_RETRY_NEEDED:
1664 rdata->result = -EAGAIN;
1665 break;
1666 default:
1667 rdata->result = -EIO;
1668 }
1669
1670 queue_work(system_nrt_wq, &rdata->work);
1671 DeleteMidQEntry(mid);
1672 atomic_dec(&server->inFlight);
1673 wake_up(&server->request_q);
1674}
1675
1676/* cifs_async_readv - send an async write, and set up mid to handle result */
1677int
1678cifs_async_readv(struct cifs_readdata *rdata)
1679{
1680 int rc;
1681 READ_REQ *smb = NULL;
1682 int wct;
1683 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1684
1685 cFYI(1, "%s: offset=%llu bytes=%u", __func__,
1686 rdata->offset, rdata->bytes);
1687
1688 if (tcon->ses->capabilities & CAP_LARGE_FILES)
1689 wct = 12;
1690 else {
1691 wct = 10; /* old style read */
1692 if ((rdata->offset >> 32) > 0) {
1693 /* can not handle this big offset for old */
1694 return -EIO;
1695 }
1696 }
1697
1698 rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb);
1699 if (rc)
1700 return rc;
1701
1702 smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid);
1703 smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
1704
1705 smb->AndXCommand = 0xFF; /* none */
1706 smb->Fid = rdata->cfile->netfid;
1707 smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF);
1708 if (wct == 12)
1709 smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32);
1710 smb->Remaining = 0;
1711 smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF);
1712 smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16);
1713 if (wct == 12)
1714 smb->ByteCount = 0;
1715 else {
1716 /* old style read */
1717 struct smb_com_readx_req *smbr =
1718 (struct smb_com_readx_req *)smb;
1719 smbr->ByteCount = 0;
1720 }
1721
1722 /* 4 for RFC1001 length + 1 for BCC */
1723 rdata->iov[0].iov_base = smb;
1724 rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
1725
1726 rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
1727 cifs_readv_receive, cifs_readv_callback,
1728 rdata, false);
1729
1730 if (rc == 0)
1731 cifs_stats_inc(&tcon->num_reads);
1732
1733 cifs_small_buf_release(smb);
1734 return rc;
1735}
1736
1377int 1737int
1378CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, 1738CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
1379 char **buf, int *pbuf_type) 1739 char **buf, int *pbuf_type)
@@ -1834,7 +2194,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
1834 2194
1835 kref_get(&wdata->refcount); 2195 kref_get(&wdata->refcount);
1836 rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, 2196 rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
1837 cifs_writev_callback, wdata, false); 2197 NULL, cifs_writev_callback, wdata, false);
1838 2198
1839 if (rc == 0) 2199 if (rc == 0)
1840 cifs_stats_inc(&tcon->num_writes); 2200 cifs_stats_inc(&tcon->num_writes);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 97a65af2a08a..f70d87d6ba61 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -320,27 +320,24 @@ requeue_echo:
320} 320}
321 321
322static bool 322static bool
323allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size, 323allocate_buffers(struct TCP_Server_Info *server)
324 bool is_large_buf)
325{ 324{
326 char *bbuf = *bigbuf, *sbuf = *smallbuf; 325 if (!server->bigbuf) {
327 326 server->bigbuf = (char *)cifs_buf_get();
328 if (bbuf == NULL) { 327 if (!server->bigbuf) {
329 bbuf = (char *)cifs_buf_get();
330 if (!bbuf) {
331 cERROR(1, "No memory for large SMB response"); 328 cERROR(1, "No memory for large SMB response");
332 msleep(3000); 329 msleep(3000);
333 /* retry will check if exiting */ 330 /* retry will check if exiting */
334 return false; 331 return false;
335 } 332 }
336 } else if (is_large_buf) { 333 } else if (server->large_buf) {
337 /* we are reusing a dirty large buf, clear its start */ 334 /* we are reusing a dirty large buf, clear its start */
338 memset(bbuf, 0, size); 335 memset(server->bigbuf, 0, sizeof(struct smb_hdr));
339 } 336 }
340 337
341 if (sbuf == NULL) { 338 if (!server->smallbuf) {
342 sbuf = (char *)cifs_small_buf_get(); 339 server->smallbuf = (char *)cifs_small_buf_get();
343 if (!sbuf) { 340 if (!server->smallbuf) {
344 cERROR(1, "No memory for SMB response"); 341 cERROR(1, "No memory for SMB response");
345 msleep(1000); 342 msleep(1000);
346 /* retry will check if exiting */ 343 /* retry will check if exiting */
@@ -349,12 +346,9 @@ allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size,
349 /* beginning of smb buffer is cleared in our buf_get */ 346 /* beginning of smb buffer is cleared in our buf_get */
350 } else { 347 } else {
351 /* if existing small buf clear beginning */ 348 /* if existing small buf clear beginning */
352 memset(sbuf, 0, size); 349 memset(server->smallbuf, 0, sizeof(struct smb_hdr));
353 } 350 }
354 351
355 *bigbuf = bbuf;
356 *smallbuf = sbuf;
357
358 return true; 352 return true;
359} 353}
360 354
@@ -375,14 +369,72 @@ server_unresponsive(struct TCP_Server_Info *server)
375 return false; 369 return false;
376} 370}
377 371
378static int 372/*
379read_from_socket(struct TCP_Server_Info *server, char *buf, 373 * kvec_array_init - clone a kvec array, and advance into it
380 unsigned int to_read) 374 * @new: pointer to memory for cloned array
375 * @iov: pointer to original array
376 * @nr_segs: number of members in original array
377 * @bytes: number of bytes to advance into the cloned array
378 *
379 * This function will copy the array provided in iov to a section of memory
380 * and advance the specified number of bytes into the new array. It returns
381 * the number of segments in the new array. "new" must be at least as big as
382 * the original iov array.
383 */
384static unsigned int
385kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
386 size_t bytes)
387{
388 size_t base = 0;
389
390 while (bytes || !iov->iov_len) {
391 int copy = min(bytes, iov->iov_len);
392
393 bytes -= copy;
394 base += copy;
395 if (iov->iov_len == base) {
396 iov++;
397 nr_segs--;
398 base = 0;
399 }
400 }
401 memcpy(new, iov, sizeof(*iov) * nr_segs);
402 new->iov_base += base;
403 new->iov_len -= base;
404 return nr_segs;
405}
406
407static struct kvec *
408get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
409{
410 struct kvec *new_iov;
411
412 if (server->iov && nr_segs <= server->nr_iov)
413 return server->iov;
414
415 /* not big enough -- allocate a new one and release the old */
416 new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
417 if (new_iov) {
418 kfree(server->iov);
419 server->iov = new_iov;
420 server->nr_iov = nr_segs;
421 }
422 return new_iov;
423}
424
425int
426cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
427 unsigned int nr_segs, unsigned int to_read)
381{ 428{
382 int length = 0; 429 int length = 0;
383 int total_read; 430 int total_read;
431 unsigned int segs;
384 struct msghdr smb_msg; 432 struct msghdr smb_msg;
385 struct kvec iov; 433 struct kvec *iov;
434
435 iov = get_server_iovec(server, nr_segs);
436 if (!iov)
437 return -ENOMEM;
386 438
387 smb_msg.msg_control = NULL; 439 smb_msg.msg_control = NULL;
388 smb_msg.msg_controllen = 0; 440 smb_msg.msg_controllen = 0;
@@ -393,10 +445,11 @@ read_from_socket(struct TCP_Server_Info *server, char *buf,
393 break; 445 break;
394 } 446 }
395 447
396 iov.iov_base = buf + total_read; 448 segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
397 iov.iov_len = to_read; 449
398 length = kernel_recvmsg(server->ssocket, &smb_msg, &iov, 1, 450 length = kernel_recvmsg(server->ssocket, &smb_msg,
399 to_read, 0); 451 iov, segs, to_read, 0);
452
400 if (server->tcpStatus == CifsExiting) { 453 if (server->tcpStatus == CifsExiting) {
401 total_read = -ESHUTDOWN; 454 total_read = -ESHUTDOWN;
402 break; 455 break;
@@ -426,6 +479,18 @@ read_from_socket(struct TCP_Server_Info *server, char *buf,
426 return total_read; 479 return total_read;
427} 480}
428 481
482int
483cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
484 unsigned int to_read)
485{
486 struct kvec iov;
487
488 iov.iov_base = buf;
489 iov.iov_len = to_read;
490
491 return cifs_readv_from_socket(server, &iov, 1, to_read);
492}
493
429static bool 494static bool
430is_smb_response(struct TCP_Server_Info *server, unsigned char type) 495is_smb_response(struct TCP_Server_Info *server, unsigned char type)
431{ 496{
@@ -471,61 +536,76 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
471} 536}
472 537
473static struct mid_q_entry * 538static struct mid_q_entry *
474find_cifs_mid(struct TCP_Server_Info *server, struct smb_hdr *buf, 539find_mid(struct TCP_Server_Info *server, struct smb_hdr *buf)
475 int *length, bool is_large_buf, bool *is_multi_rsp, char **bigbuf)
476{ 540{
477 struct mid_q_entry *mid = NULL, *tmp_mid, *ret = NULL; 541 struct mid_q_entry *mid;
478 542
479 spin_lock(&GlobalMid_Lock); 543 spin_lock(&GlobalMid_Lock);
480 list_for_each_entry_safe(mid, tmp_mid, &server->pending_mid_q, qhead) { 544 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
481 if (mid->mid != buf->Mid || 545 if (mid->mid == buf->Mid &&
482 mid->midState != MID_REQUEST_SUBMITTED || 546 mid->midState == MID_REQUEST_SUBMITTED &&
483 mid->command != buf->Command) 547 mid->command == buf->Command) {
484 continue; 548 spin_unlock(&GlobalMid_Lock);
485 549 return mid;
486 if (*length == 0 && check2ndT2(buf) > 0) {
487 /* We have a multipart transact2 resp */
488 *is_multi_rsp = true;
489 if (mid->resp_buf) {
490 /* merge response - fix up 1st*/
491 *length = coalesce_t2(buf, mid->resp_buf);
492 if (*length > 0) {
493 *length = 0;
494 mid->multiRsp = true;
495 break;
496 }
497 /* All parts received or packet is malformed. */
498 mid->multiEnd = true;
499 goto multi_t2_fnd;
500 }
501 if (!is_large_buf) {
502 /*FIXME: switch to already allocated largebuf?*/
503 cERROR(1, "1st trans2 resp needs bigbuf");
504 } else {
505 /* Have first buffer */
506 mid->resp_buf = buf;
507 mid->largeBuf = true;
508 *bigbuf = NULL;
509 }
510 break;
511 } 550 }
512 mid->resp_buf = buf; 551 }
513 mid->largeBuf = is_large_buf; 552 spin_unlock(&GlobalMid_Lock);
514multi_t2_fnd: 553 return NULL;
515 if (*length == 0) 554}
516 mid->midState = MID_RESPONSE_RECEIVED; 555
517 else 556void
518 mid->midState = MID_RESPONSE_MALFORMED; 557dequeue_mid(struct mid_q_entry *mid, bool malformed)
558{
519#ifdef CONFIG_CIFS_STATS2 559#ifdef CONFIG_CIFS_STATS2
520 mid->when_received = jiffies; 560 mid->when_received = jiffies;
521#endif 561#endif
522 list_del_init(&mid->qhead); 562 spin_lock(&GlobalMid_Lock);
523 ret = mid; 563 if (!malformed)
524 break; 564 mid->midState = MID_RESPONSE_RECEIVED;
525 } 565 else
566 mid->midState = MID_RESPONSE_MALFORMED;
567 list_del_init(&mid->qhead);
526 spin_unlock(&GlobalMid_Lock); 568 spin_unlock(&GlobalMid_Lock);
569}
527 570
528 return ret; 571static void
572handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
573 struct smb_hdr *buf, int malformed)
574{
575 if (malformed == 0 && check2ndT2(buf) > 0) {
576 mid->multiRsp = true;
577 if (mid->resp_buf) {
578 /* merge response - fix up 1st*/
579 malformed = coalesce_t2(buf, mid->resp_buf);
580 if (malformed > 0)
581 return;
582
583 /* All parts received or packet is malformed. */
584 mid->multiEnd = true;
585 return dequeue_mid(mid, malformed);
586 }
587 if (!server->large_buf) {
588 /*FIXME: switch to already allocated largebuf?*/
589 cERROR(1, "1st trans2 resp needs bigbuf");
590 } else {
591 /* Have first buffer */
592 mid->resp_buf = buf;
593 mid->largeBuf = true;
594 server->bigbuf = NULL;
595 }
596 return;
597 }
598 mid->resp_buf = buf;
599 mid->largeBuf = server->large_buf;
600 /* Was previous buf put in mpx struct for multi-rsp? */
601 if (!mid->multiRsp) {
602 /* smb buffer will be freed by user thread */
603 if (server->large_buf)
604 server->bigbuf = NULL;
605 else
606 server->smallbuf = NULL;
607 }
608 dequeue_mid(mid, malformed);
529} 609}
530 610
531static void clean_demultiplex_info(struct TCP_Server_Info *server) 611static void clean_demultiplex_info(struct TCP_Server_Info *server)
@@ -615,6 +695,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
615 } 695 }
616 696
617 kfree(server->hostname); 697 kfree(server->hostname);
698 kfree(server->iov);
618 kfree(server); 699 kfree(server);
619 700
620 length = atomic_dec_return(&tcpSesAllocCount); 701 length = atomic_dec_return(&tcpSesAllocCount);
@@ -624,17 +705,70 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
624} 705}
625 706
626static int 707static int
708standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
709{
710 int length;
711 char *buf = server->smallbuf;
712 struct smb_hdr *smb_buffer = (struct smb_hdr *)buf;
713 unsigned int pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
714
715 /* make sure this will fit in a large buffer */
716 if (pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
717 cERROR(1, "SMB response too long (%u bytes)",
718 pdu_length);
719 cifs_reconnect(server);
720 wake_up(&server->response_q);
721 return -EAGAIN;
722 }
723
724 /* switch to large buffer if too big for a small one */
725 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
726 server->large_buf = true;
727 memcpy(server->bigbuf, server->smallbuf, server->total_read);
728 buf = server->bigbuf;
729 smb_buffer = (struct smb_hdr *)buf;
730 }
731
732 /* now read the rest */
733 length = cifs_read_from_socket(server,
734 buf + sizeof(struct smb_hdr) - 1,
735 pdu_length - sizeof(struct smb_hdr) + 1 + 4);
736 if (length < 0)
737 return length;
738 server->total_read += length;
739
740 dump_smb(smb_buffer, server->total_read);
741
742 /*
743 * We know that we received enough to get to the MID as we
744 * checked the pdu_length earlier. Now check to see
745 * if the rest of the header is OK. We borrow the length
746 * var for the rest of the loop to avoid a new stack var.
747 *
748 * 48 bytes is enough to display the header and a little bit
749 * into the payload for debugging purposes.
750 */
751 length = checkSMB(smb_buffer, smb_buffer->Mid, server->total_read);
752 if (length != 0)
753 cifs_dump_mem("Bad SMB: ", buf,
754 min_t(unsigned int, server->total_read, 48));
755
756 if (mid)
757 handle_mid(mid, server, smb_buffer, length);
758
759 return length;
760}
761
762static int
627cifs_demultiplex_thread(void *p) 763cifs_demultiplex_thread(void *p)
628{ 764{
629 int length; 765 int length;
630 struct TCP_Server_Info *server = p; 766 struct TCP_Server_Info *server = p;
631 unsigned int pdu_length, total_read; 767 unsigned int pdu_length;
632 char *buf = NULL, *bigbuf = NULL, *smallbuf = NULL; 768 char *buf = NULL;
633 struct smb_hdr *smb_buffer = NULL; 769 struct smb_hdr *smb_buffer = NULL;
634 struct task_struct *task_to_wake = NULL; 770 struct task_struct *task_to_wake = NULL;
635 struct mid_q_entry *mid_entry; 771 struct mid_q_entry *mid_entry;
636 bool isLargeBuf = false;
637 bool isMultiRsp = false;
638 772
639 current->flags |= PF_MEMALLOC; 773 current->flags |= PF_MEMALLOC;
640 cFYI(1, "Demultiplex PID: %d", task_pid_nr(current)); 774 cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
@@ -649,20 +783,18 @@ cifs_demultiplex_thread(void *p)
649 if (try_to_freeze()) 783 if (try_to_freeze())
650 continue; 784 continue;
651 785
652 if (!allocate_buffers(&bigbuf, &smallbuf, 786 if (!allocate_buffers(server))
653 sizeof(struct smb_hdr), isLargeBuf))
654 continue; 787 continue;
655 788
656 isLargeBuf = false; 789 server->large_buf = false;
657 isMultiRsp = false; 790 smb_buffer = (struct smb_hdr *)server->smallbuf;
658 smb_buffer = (struct smb_hdr *)smallbuf; 791 buf = server->smallbuf;
659 buf = smallbuf;
660 pdu_length = 4; /* enough to get RFC1001 header */ 792 pdu_length = 4; /* enough to get RFC1001 header */
661 793
662 length = read_from_socket(server, buf, pdu_length); 794 length = cifs_read_from_socket(server, buf, pdu_length);
663 if (length < 0) 795 if (length < 0)
664 continue; 796 continue;
665 total_read = length; 797 server->total_read = length;
666 798
667 /* 799 /*
668 * The right amount was read from socket - 4 bytes, 800 * The right amount was read from socket - 4 bytes,
@@ -674,64 +806,42 @@ cifs_demultiplex_thread(void *p)
674 if (!is_smb_response(server, buf[0])) 806 if (!is_smb_response(server, buf[0]))
675 continue; 807 continue;
676 808
677 /* check the length */ 809 /* make sure we have enough to get to the MID */
678 if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) || 810 if (pdu_length < sizeof(struct smb_hdr) - 1 - 4) {
679 (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) { 811 cERROR(1, "SMB response too short (%u bytes)",
680 cERROR(1, "Invalid size SMB length %d pdu_length %d", 812 pdu_length);
681 4, pdu_length + 4);
682 cifs_reconnect(server); 813 cifs_reconnect(server);
683 wake_up(&server->response_q); 814 wake_up(&server->response_q);
684 continue; 815 continue;
685 } 816 }
686 817
687 /* else length ok */ 818 /* read down to the MID */
688 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { 819 length = cifs_read_from_socket(server, buf + 4,
689 isLargeBuf = true; 820 sizeof(struct smb_hdr) - 1 - 4);
690 memcpy(bigbuf, smallbuf, 4);
691 smb_buffer = (struct smb_hdr *)bigbuf;
692 buf = bigbuf;
693 }
694
695 length = read_from_socket(server, buf + 4, pdu_length);
696 if (length < 0) 821 if (length < 0)
697 continue; 822 continue;
698 total_read += length; 823 server->total_read += length;
699 824
700 dump_smb(smb_buffer, total_read); 825 mid_entry = find_mid(server, smb_buffer);
701 826
702 /* 827 if (!mid_entry || !mid_entry->receive)
703 * We know that we received enough to get to the MID as we 828 length = standard_receive3(server, mid_entry);
704 * checked the pdu_length earlier. Now check to see 829 else
705 * if the rest of the header is OK. We borrow the length 830 length = mid_entry->receive(server, mid_entry);
706 * var for the rest of the loop to avoid a new stack var.
707 *
708 * 48 bytes is enough to display the header and a little bit
709 * into the payload for debugging purposes.
710 */
711 length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
712 if (length != 0)
713 cifs_dump_mem("Bad SMB: ", buf,
714 min_t(unsigned int, total_read, 48));
715 831
716 server->lstrp = jiffies; 832 if (length < 0)
833 continue;
717 834
718 mid_entry = find_cifs_mid(server, smb_buffer, &length, 835 if (server->large_buf) {
719 isLargeBuf, &isMultiRsp, &bigbuf); 836 buf = server->bigbuf;
837 smb_buffer = (struct smb_hdr *)buf;
838 }
839
840 server->lstrp = jiffies;
720 if (mid_entry != NULL) { 841 if (mid_entry != NULL) {
721 mid_entry->callback(mid_entry); 842 if (!mid_entry->multiRsp || mid_entry->multiEnd)
722 /* Was previous buf put in mpx struct for multi-rsp? */ 843 mid_entry->callback(mid_entry);
723 if (!isMultiRsp) { 844 } else if (!is_valid_oplock_break(smb_buffer, server)) {
724 /* smb buffer will be freed by user thread */
725 if (isLargeBuf)
726 bigbuf = NULL;
727 else
728 smallbuf = NULL;
729 }
730 } else if (length != 0) {
731 /* response sanity checks failed */
732 continue;
733 } else if (!is_valid_oplock_break(smb_buffer, server) &&
734 !isMultiRsp) {
735 cERROR(1, "No task to wake, unknown frame received! " 845 cERROR(1, "No task to wake, unknown frame received! "
736 "NumMids %d", atomic_read(&midCount)); 846 "NumMids %d", atomic_read(&midCount));
737 cifs_dump_mem("Received Data is: ", buf, 847 cifs_dump_mem("Received Data is: ", buf,
@@ -745,9 +855,9 @@ cifs_demultiplex_thread(void *p)
745 } /* end while !EXITING */ 855 } /* end while !EXITING */
746 856
747 /* buffer usually freed in free_mid - need to free it here on exit */ 857 /* buffer usually freed in free_mid - need to free it here on exit */
748 cifs_buf_release(bigbuf); 858 cifs_buf_release(server->bigbuf);
749 if (smallbuf) /* no sense logging a debug message if NULL */ 859 if (server->smallbuf) /* no sense logging a debug message if NULL */
750 cifs_small_buf_release(smallbuf); 860 cifs_small_buf_release(server->smallbuf);
751 861
752 task_to_wake = xchg(&server->tsk, NULL); 862 task_to_wake = xchg(&server->tsk, NULL);
753 clean_demultiplex_info(server); 863 clean_demultiplex_info(server);
@@ -2200,16 +2310,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2200 (new->mnt_cifs_flags & CIFS_MOUNT_MASK)) 2310 (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
2201 return 0; 2311 return 0;
2202 2312
2203 if (old->rsize != new->rsize)
2204 return 0;
2205
2206 /* 2313 /*
2207 * We want to share sb only if we don't specify wsize or specified wsize 2314 * We want to share sb only if we don't specify an r/wsize or
2208 * is greater or equal than existing one. 2315 * specified r/wsize is greater than or equal to existing one.
2209 */ 2316 */
2210 if (new->wsize && new->wsize < old->wsize) 2317 if (new->wsize && new->wsize < old->wsize)
2211 return 0; 2318 return 0;
2212 2319
2320 if (new->rsize && new->rsize < old->rsize)
2321 return 0;
2322
2213 if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid) 2323 if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
2214 return 0; 2324 return 0;
2215 2325
@@ -2647,14 +2757,6 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
2647 CIFS_MOUNT_POSIX_PATHS; 2757 CIFS_MOUNT_POSIX_PATHS;
2648 } 2758 }
2649 2759
2650 if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
2651 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
2652 cifs_sb->rsize = 127 * 1024;
2653 cFYI(DBG2, "larger reads not supported by srv");
2654 }
2655 }
2656
2657
2658 cFYI(1, "Negotiate caps 0x%x", (int)cap); 2760 cFYI(1, "Negotiate caps 0x%x", (int)cap);
2659#ifdef CONFIG_CIFS_DEBUG2 2761#ifdef CONFIG_CIFS_DEBUG2
2660 if (cap & CIFS_UNIX_FCNTL_CAP) 2762 if (cap & CIFS_UNIX_FCNTL_CAP)
@@ -2699,27 +2801,11 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2699 spin_lock_init(&cifs_sb->tlink_tree_lock); 2801 spin_lock_init(&cifs_sb->tlink_tree_lock);
2700 cifs_sb->tlink_tree = RB_ROOT; 2802 cifs_sb->tlink_tree = RB_ROOT;
2701 2803
2702 if (pvolume_info->rsize > CIFSMaxBufSize) {
2703 cERROR(1, "rsize %d too large, using MaxBufSize",
2704 pvolume_info->rsize);
2705 cifs_sb->rsize = CIFSMaxBufSize;
2706 } else if ((pvolume_info->rsize) &&
2707 (pvolume_info->rsize <= CIFSMaxBufSize))
2708 cifs_sb->rsize = pvolume_info->rsize;
2709 else /* default */
2710 cifs_sb->rsize = CIFSMaxBufSize;
2711
2712 if (cifs_sb->rsize < 2048) {
2713 cifs_sb->rsize = 2048;
2714 /* Windows ME may prefer this */
2715 cFYI(1, "readsize set to minimum: 2048");
2716 }
2717
2718 /* 2804 /*
2719 * Temporarily set wsize for matching superblock. If we end up using 2805 * Temporarily set r/wsize for matching superblock. If we end up using
2720 * new sb then cifs_negotiate_wsize will later negotiate it downward 2806 * new sb then client will later negotiate it downward if needed.
2721 * if needed.
2722 */ 2807 */
2808 cifs_sb->rsize = pvolume_info->rsize;
2723 cifs_sb->wsize = pvolume_info->wsize; 2809 cifs_sb->wsize = pvolume_info->wsize;
2724 2810
2725 cifs_sb->mnt_uid = pvolume_info->linux_uid; 2811 cifs_sb->mnt_uid = pvolume_info->linux_uid;
@@ -2794,29 +2880,41 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2794} 2880}
2795 2881
2796/* 2882/*
2797 * When the server supports very large writes via POSIX extensions, we can 2883 * When the server supports very large reads and writes via POSIX extensions,
2798 * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including 2884 * we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
2799 * the RFC1001 length. 2885 * including the RFC1001 length.
2800 * 2886 *
2801 * Note that this might make for "interesting" allocation problems during 2887 * Note that this might make for "interesting" allocation problems during
2802 * writeback however as we have to allocate an array of pointers for the 2888 * writeback however as we have to allocate an array of pointers for the
2803 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 2889 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
2890 *
2891 * For reads, there is a similar problem as we need to allocate an array
2892 * of kvecs to handle the receive, though that should only need to be done
2893 * once.
2804 */ 2894 */
2805#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4) 2895#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
2896#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
2806 2897
2807/* 2898/*
2808 * When the server doesn't allow large posix writes, only allow a wsize of 2899 * When the server doesn't allow large posix writes, only allow a rsize/wsize
2809 * 2^17-1 minus the size of the WRITE_AND_X header. That allows for a write up 2900 * of 2^17-1 minus the size of the call header. That allows for a read or
2810 * to the maximum size described by RFC1002. 2901 * write up to the maximum size described by RFC1002.
2811 */ 2902 */
2812#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4) 2903#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
2904#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
2813 2905
2814/* 2906/*
2815 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 2907 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
2816 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill 2908 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
2817 * a single wsize request with a single call. 2909 * a single wsize request with a single call.
2818 */ 2910 */
2819#define CIFS_DEFAULT_WSIZE (1024 * 1024) 2911#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
2912
2913/*
2914 * Windows only supports a max of 60k reads. Default to that when posix
2915 * extensions aren't in force.
2916 */
2917#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
2820 2918
2821static unsigned int 2919static unsigned int
2822cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) 2920cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
@@ -2824,7 +2922,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2824 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 2922 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2825 struct TCP_Server_Info *server = tcon->ses->server; 2923 struct TCP_Server_Info *server = tcon->ses->server;
2826 unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize : 2924 unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
2827 CIFS_DEFAULT_WSIZE; 2925 CIFS_DEFAULT_IOSIZE;
2828 2926
2829 /* can server support 24-bit write sizes? (via UNIX extensions) */ 2927 /* can server support 24-bit write sizes? (via UNIX extensions) */
2830 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) 2928 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
@@ -2847,6 +2945,50 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2847 return wsize; 2945 return wsize;
2848} 2946}
2849 2947
2948static unsigned int
2949cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2950{
2951 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2952 struct TCP_Server_Info *server = tcon->ses->server;
2953 unsigned int rsize, defsize;
2954
2955 /*
2956 * Set default value...
2957 *
2958 * HACK alert! Ancient servers have very small buffers. Even though
2959 * MS-CIFS indicates that servers are only limited by the client's
2960 * bufsize for reads, testing against win98se shows that it throws
2961 * INVALID_PARAMETER errors if you try to request too large a read.
2962 *
2963 * If the server advertises a MaxBufferSize of less than one page,
2964 * assume that it also can't satisfy reads larger than that either.
2965 *
2966 * FIXME: Is there a better heuristic for this?
2967 */
2968 if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
2969 defsize = CIFS_DEFAULT_IOSIZE;
2970 else if (server->capabilities & CAP_LARGE_READ_X)
2971 defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
2972 else if (server->maxBuf >= PAGE_CACHE_SIZE)
2973 defsize = CIFSMaxBufSize;
2974 else
2975 defsize = server->maxBuf - sizeof(READ_RSP);
2976
2977 rsize = pvolume_info->rsize ? pvolume_info->rsize : defsize;
2978
2979 /*
2980 * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
2981 * the client's MaxBufferSize.
2982 */
2983 if (!(server->capabilities & CAP_LARGE_READ_X))
2984 rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
2985
2986 /* hard limit of CIFS_MAX_RSIZE */
2987 rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
2988
2989 return rsize;
2990}
2991
2850static int 2992static int
2851is_path_accessible(int xid, struct cifs_tcon *tcon, 2993is_path_accessible(int xid, struct cifs_tcon *tcon,
2852 struct cifs_sb_info *cifs_sb, const char *full_path) 2994 struct cifs_sb_info *cifs_sb, const char *full_path)
@@ -3040,6 +3182,22 @@ cifs_get_volume_info(char *mount_data, const char *devname)
3040 return volume_info; 3182 return volume_info;
3041} 3183}
3042 3184
3185/* make sure ra_pages is a multiple of rsize */
3186static inline unsigned int
3187cifs_ra_pages(struct cifs_sb_info *cifs_sb)
3188{
3189 unsigned int reads;
3190 unsigned int rsize_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
3191
3192 if (rsize_pages >= default_backing_dev_info.ra_pages)
3193 return default_backing_dev_info.ra_pages;
3194 else if (rsize_pages == 0)
3195 return rsize_pages;
3196
3197 reads = default_backing_dev_info.ra_pages / rsize_pages;
3198 return reads * rsize_pages;
3199}
3200
3043int 3201int
3044cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) 3202cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
3045{ 3203{
@@ -3058,8 +3216,6 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
3058 if (rc) 3216 if (rc)
3059 return rc; 3217 return rc;
3060 3218
3061 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
3062
3063#ifdef CONFIG_CIFS_DFS_UPCALL 3219#ifdef CONFIG_CIFS_DFS_UPCALL
3064try_mount_again: 3220try_mount_again:
3065 /* cleanup activities if we're chasing a referral */ 3221 /* cleanup activities if we're chasing a referral */
@@ -3124,14 +3280,11 @@ try_mount_again:
3124 CIFSSMBQFSAttributeInfo(xid, tcon); 3280 CIFSSMBQFSAttributeInfo(xid, tcon);
3125 } 3281 }
3126 3282
3127 if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
3128 cifs_sb->rsize = 1024 * 127;
3129 cFYI(DBG2, "no very large read support, rsize now 127K");
3130 }
3131 if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
3132 cifs_sb->rsize = min(cifs_sb->rsize, CIFSMaxBufSize);
3133
3134 cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info); 3283 cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
3284 cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
3285
3286 /* tune readahead according to rsize */
3287 cifs_sb->bdi.ra_pages = cifs_ra_pages(cifs_sb);
3135 3288
3136remote_path_check: 3289remote_path_check:
3137#ifdef CONFIG_CIFS_DFS_UPCALL 3290#ifdef CONFIG_CIFS_DFS_UPCALL
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 852d1f39adae..a3b545ff5250 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/mount.h> 33#include <linux/mount.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h>
35#include <asm/div64.h> 36#include <asm/div64.h>
36#include "cifsfs.h" 37#include "cifsfs.h"
37#include "cifspdu.h" 38#include "cifspdu.h"
@@ -1757,6 +1758,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1757 struct smb_com_read_rsp *pSMBr; 1758 struct smb_com_read_rsp *pSMBr;
1758 struct cifs_io_parms io_parms; 1759 struct cifs_io_parms io_parms;
1759 char *read_data; 1760 char *read_data;
1761 unsigned int rsize;
1760 __u32 pid; 1762 __u32 pid;
1761 1763
1762 if (!nr_segs) 1764 if (!nr_segs)
@@ -1769,6 +1771,9 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1769 xid = GetXid(); 1771 xid = GetXid();
1770 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1772 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1771 1773
1774 /* FIXME: set up handlers for larger reads and/or convert to async */
1775 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
1776
1772 open_file = file->private_data; 1777 open_file = file->private_data;
1773 pTcon = tlink_tcon(open_file->tlink); 1778 pTcon = tlink_tcon(open_file->tlink);
1774 1779
@@ -1781,7 +1786,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
1781 cFYI(1, "attempting read on write only file instance"); 1786 cFYI(1, "attempting read on write only file instance");
1782 1787
1783 for (total_read = 0; total_read < len; total_read += bytes_read) { 1788 for (total_read = 0; total_read < len; total_read += bytes_read) {
1784 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize); 1789 cur_len = min_t(const size_t, len - total_read, rsize);
1785 rc = -EAGAIN; 1790 rc = -EAGAIN;
1786 read_data = NULL; 1791 read_data = NULL;
1787 1792
@@ -1873,6 +1878,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1873 unsigned int bytes_read = 0; 1878 unsigned int bytes_read = 0;
1874 unsigned int total_read; 1879 unsigned int total_read;
1875 unsigned int current_read_size; 1880 unsigned int current_read_size;
1881 unsigned int rsize;
1876 struct cifs_sb_info *cifs_sb; 1882 struct cifs_sb_info *cifs_sb;
1877 struct cifs_tcon *pTcon; 1883 struct cifs_tcon *pTcon;
1878 int xid; 1884 int xid;
@@ -1885,6 +1891,9 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1885 xid = GetXid(); 1891 xid = GetXid();
1886 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1892 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1887 1893
1894 /* FIXME: set up handlers for larger reads and/or convert to async */
1895 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
1896
1888 if (file->private_data == NULL) { 1897 if (file->private_data == NULL) {
1889 rc = -EBADF; 1898 rc = -EBADF;
1890 FreeXid(xid); 1899 FreeXid(xid);
@@ -1904,8 +1913,8 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1904 for (total_read = 0, current_offset = read_data; 1913 for (total_read = 0, current_offset = read_data;
1905 read_size > total_read; 1914 read_size > total_read;
1906 total_read += bytes_read, current_offset += bytes_read) { 1915 total_read += bytes_read, current_offset += bytes_read) {
1907 current_read_size = min_t(uint, read_size - total_read, 1916 current_read_size = min_t(uint, read_size - total_read, rsize);
1908 cifs_sb->rsize); 1917
1909 /* For windows me and 9x we do not want to request more 1918 /* For windows me and 9x we do not want to request more
1910 than it negotiated since it will refuse the read then */ 1919 than it negotiated since it will refuse the read then */
1911 if ((pTcon->ses) && 1920 if ((pTcon->ses) &&
@@ -2000,82 +2009,24 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2000 return rc; 2009 return rc;
2001} 2010}
2002 2011
2003
2004static void cifs_copy_cache_pages(struct address_space *mapping,
2005 struct list_head *pages, int bytes_read, char *data)
2006{
2007 struct page *page;
2008 char *target;
2009
2010 while (bytes_read > 0) {
2011 if (list_empty(pages))
2012 break;
2013
2014 page = list_entry(pages->prev, struct page, lru);
2015 list_del(&page->lru);
2016
2017 if (add_to_page_cache_lru(page, mapping, page->index,
2018 GFP_KERNEL)) {
2019 page_cache_release(page);
2020 cFYI(1, "Add page cache failed");
2021 data += PAGE_CACHE_SIZE;
2022 bytes_read -= PAGE_CACHE_SIZE;
2023 continue;
2024 }
2025 page_cache_release(page);
2026
2027 target = kmap_atomic(page, KM_USER0);
2028
2029 if (PAGE_CACHE_SIZE > bytes_read) {
2030 memcpy(target, data, bytes_read);
2031 /* zero the tail end of this partial page */
2032 memset(target + bytes_read, 0,
2033 PAGE_CACHE_SIZE - bytes_read);
2034 bytes_read = 0;
2035 } else {
2036 memcpy(target, data, PAGE_CACHE_SIZE);
2037 bytes_read -= PAGE_CACHE_SIZE;
2038 }
2039 kunmap_atomic(target, KM_USER0);
2040
2041 flush_dcache_page(page);
2042 SetPageUptodate(page);
2043 unlock_page(page);
2044 data += PAGE_CACHE_SIZE;
2045
2046 /* add page to FS-Cache */
2047 cifs_readpage_to_fscache(mapping->host, page);
2048 }
2049 return;
2050}
2051
2052static int cifs_readpages(struct file *file, struct address_space *mapping, 2012static int cifs_readpages(struct file *file, struct address_space *mapping,
2053 struct list_head *page_list, unsigned num_pages) 2013 struct list_head *page_list, unsigned num_pages)
2054{ 2014{
2055 int rc = -EACCES; 2015 int rc;
2056 int xid; 2016 struct list_head tmplist;
2057 loff_t offset; 2017 struct cifsFileInfo *open_file = file->private_data;
2058 struct page *page; 2018 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2059 struct cifs_sb_info *cifs_sb; 2019 unsigned int rsize = cifs_sb->rsize;
2060 struct cifs_tcon *pTcon; 2020 pid_t pid;
2061 unsigned int bytes_read = 0;
2062 unsigned int read_size, i;
2063 char *smb_read_data = NULL;
2064 struct smb_com_read_rsp *pSMBr;
2065 struct cifsFileInfo *open_file;
2066 struct cifs_io_parms io_parms;
2067 int buf_type = CIFS_NO_BUFFER;
2068 __u32 pid;
2069 2021
2070 xid = GetXid(); 2022 /*
2071 if (file->private_data == NULL) { 2023 * Give up immediately if rsize is too small to read an entire page.
2072 rc = -EBADF; 2024 * The VFS will fall back to readpage. We should never reach this
2073 FreeXid(xid); 2025 * point however since we set ra_pages to 0 when the rsize is smaller
2074 return rc; 2026 * than a cache page.
2075 } 2027 */
2076 open_file = file->private_data; 2028 if (unlikely(rsize < PAGE_CACHE_SIZE))
2077 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2029 return 0;
2078 pTcon = tlink_tcon(open_file->tlink);
2079 2030
2080 /* 2031 /*
2081 * Reads as many pages as possible from fscache. Returns -ENOBUFS 2032 * Reads as many pages as possible from fscache. Returns -ENOBUFS
@@ -2084,125 +2035,127 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2084 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, 2035 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2085 &num_pages); 2036 &num_pages);
2086 if (rc == 0) 2037 if (rc == 0)
2087 goto read_complete; 2038 return rc;
2088 2039
2089 cFYI(DBG2, "rpages: num pages %d", num_pages);
2090 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 2040 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2091 pid = open_file->pid; 2041 pid = open_file->pid;
2092 else 2042 else
2093 pid = current->tgid; 2043 pid = current->tgid;
2094 2044
2095 for (i = 0; i < num_pages; ) { 2045 rc = 0;
2096 unsigned contig_pages; 2046 INIT_LIST_HEAD(&tmplist);
2097 struct page *tmp_page; 2047
2098 unsigned long expected_index; 2048 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2049 mapping, num_pages);
2099 2050
2100 if (list_empty(page_list)) 2051 /*
2101 break; 2052 * Start with the page at end of list and move it to private
2053 * list. Do the same with any following pages until we hit
2054 * the rsize limit, hit an index discontinuity, or run out of
2055 * pages. Issue the async read and then start the loop again
2056 * until the list is empty.
2057 *
2058 * Note that list order is important. The page_list is in
2059 * the order of declining indexes. When we put the pages in
2060 * the rdata->pages, then we want them in increasing order.
2061 */
2062 while (!list_empty(page_list)) {
2063 unsigned int bytes = PAGE_CACHE_SIZE;
2064 unsigned int expected_index;
2065 unsigned int nr_pages = 1;
2066 loff_t offset;
2067 struct page *page, *tpage;
2068 struct cifs_readdata *rdata;
2102 2069
2103 page = list_entry(page_list->prev, struct page, lru); 2070 page = list_entry(page_list->prev, struct page, lru);
2071
2072 /*
2073 * Lock the page and put it in the cache. Since no one else
2074 * should have access to this page, we're safe to simply set
2075 * PG_locked without checking it first.
2076 */
2077 __set_page_locked(page);
2078 rc = add_to_page_cache_locked(page, mapping,
2079 page->index, GFP_KERNEL);
2080
2081 /* give up if we can't stick it in the cache */
2082 if (rc) {
2083 __clear_page_locked(page);
2084 break;
2085 }
2086
2087 /* move first page to the tmplist */
2104 offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 2088 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2089 list_move_tail(&page->lru, &tmplist);
2105 2090
2106 /* count adjacent pages that we will read into */ 2091 /* now try and add more pages onto the request */
2107 contig_pages = 0; 2092 expected_index = page->index + 1;
2108 expected_index = 2093 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2109 list_entry(page_list->prev, struct page, lru)->index; 2094 /* discontinuity ? */
2110 list_for_each_entry_reverse(tmp_page, page_list, lru) { 2095 if (page->index != expected_index)
2111 if (tmp_page->index == expected_index) {
2112 contig_pages++;
2113 expected_index++;
2114 } else
2115 break; 2096 break;
2097
2098 /* would this page push the read over the rsize? */
2099 if (bytes + PAGE_CACHE_SIZE > rsize)
2100 break;
2101
2102 __set_page_locked(page);
2103 if (add_to_page_cache_locked(page, mapping,
2104 page->index, GFP_KERNEL)) {
2105 __clear_page_locked(page);
2106 break;
2107 }
2108 list_move_tail(&page->lru, &tmplist);
2109 bytes += PAGE_CACHE_SIZE;
2110 expected_index++;
2111 nr_pages++;
2116 } 2112 }
2117 if (contig_pages + i > num_pages) 2113
2118 contig_pages = num_pages - i; 2114 rdata = cifs_readdata_alloc(nr_pages);
2119 2115 if (!rdata) {
2120 /* for reads over a certain size could initiate async 2116 /* best to give up if we're out of mem */
2121 read ahead */ 2117 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2122 2118 list_del(&page->lru);
2123 read_size = contig_pages * PAGE_CACHE_SIZE; 2119 lru_cache_add_file(page);
2124 /* Read size needs to be in multiples of one page */ 2120 unlock_page(page);
2125 read_size = min_t(const unsigned int, read_size, 2121 page_cache_release(page);
2126 cifs_sb->rsize & PAGE_CACHE_MASK); 2122 }
2127 cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d", 2123 rc = -ENOMEM;
2128 read_size, contig_pages); 2124 break;
2129 rc = -EAGAIN; 2125 }
2130 while (rc == -EAGAIN) { 2126
2127 spin_lock(&cifs_file_list_lock);
2128 cifsFileInfo_get(open_file);
2129 spin_unlock(&cifs_file_list_lock);
2130 rdata->cfile = open_file;
2131 rdata->mapping = mapping;
2132 rdata->offset = offset;
2133 rdata->bytes = bytes;
2134 rdata->pid = pid;
2135 list_splice_init(&tmplist, &rdata->pages);
2136
2137 do {
2131 if (open_file->invalidHandle) { 2138 if (open_file->invalidHandle) {
2132 rc = cifs_reopen_file(open_file, true); 2139 rc = cifs_reopen_file(open_file, true);
2133 if (rc != 0) 2140 if (rc != 0)
2134 break; 2141 continue;
2135 } 2142 }
2136 io_parms.netfid = open_file->netfid; 2143 rc = cifs_async_readv(rdata);
2137 io_parms.pid = pid; 2144 } while (rc == -EAGAIN);
2138 io_parms.tcon = pTcon;
2139 io_parms.offset = offset;
2140 io_parms.length = read_size;
2141 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2142 &smb_read_data, &buf_type);
2143 /* BB more RC checks ? */
2144 if (rc == -EAGAIN) {
2145 if (smb_read_data) {
2146 if (buf_type == CIFS_SMALL_BUFFER)
2147 cifs_small_buf_release(smb_read_data);
2148 else if (buf_type == CIFS_LARGE_BUFFER)
2149 cifs_buf_release(smb_read_data);
2150 smb_read_data = NULL;
2151 }
2152 }
2153 }
2154 if ((rc < 0) || (smb_read_data == NULL)) {
2155 cFYI(1, "Read error in readpages: %d", rc);
2156 break;
2157 } else if (bytes_read > 0) {
2158 task_io_account_read(bytes_read);
2159 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
2160 cifs_copy_cache_pages(mapping, page_list, bytes_read,
2161 smb_read_data + 4 /* RFC1001 hdr */ +
2162 le16_to_cpu(pSMBr->DataOffset));
2163
2164 i += bytes_read >> PAGE_CACHE_SHIFT;
2165 cifs_stats_bytes_read(pTcon, bytes_read);
2166 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
2167 i++; /* account for partial page */
2168
2169 /* server copy of file can have smaller size
2170 than client */
2171 /* BB do we need to verify this common case ?
2172 this case is ok - if we are at server EOF
2173 we will hit it on next read */
2174 2145
2175 /* break; */ 2146 if (rc != 0) {
2147 list_for_each_entry_safe(page, tpage, &rdata->pages,
2148 lru) {
2149 list_del(&page->lru);
2150 lru_cache_add_file(page);
2151 unlock_page(page);
2152 page_cache_release(page);
2176 } 2153 }
2177 } else { 2154 cifs_readdata_free(rdata);
2178 cFYI(1, "No bytes read (%d) at offset %lld . "
2179 "Cleaning remaining pages from readahead list",
2180 bytes_read, offset);
2181 /* BB turn off caching and do new lookup on
2182 file size at server? */
2183 break; 2155 break;
2184 } 2156 }
2185 if (smb_read_data) {
2186 if (buf_type == CIFS_SMALL_BUFFER)
2187 cifs_small_buf_release(smb_read_data);
2188 else if (buf_type == CIFS_LARGE_BUFFER)
2189 cifs_buf_release(smb_read_data);
2190 smb_read_data = NULL;
2191 }
2192 bytes_read = 0;
2193 }
2194
2195/* need to free smb_read_data buf before exit */
2196 if (smb_read_data) {
2197 if (buf_type == CIFS_SMALL_BUFFER)
2198 cifs_small_buf_release(smb_read_data);
2199 else if (buf_type == CIFS_LARGE_BUFFER)
2200 cifs_buf_release(smb_read_data);
2201 smb_read_data = NULL;
2202 } 2157 }
2203 2158
2204read_complete:
2205 FreeXid(xid);
2206 return rc; 2159 return rc;
2207} 2160}
2208 2161
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 33a3fbf3a3a5..0cc9584f5889 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -26,6 +26,7 @@
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/freezer.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#include <asm/processor.h> 31#include <asm/processor.h>
31#include <linux/mempool.h> 32#include <linux/mempool.h>
@@ -324,7 +325,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
324{ 325{
325 int error; 326 int error;
326 327
327 error = wait_event_killable(server->response_q, 328 error = wait_event_freezekillable(server->response_q,
328 midQ->midState != MID_REQUEST_SUBMITTED); 329 midQ->midState != MID_REQUEST_SUBMITTED);
329 if (error < 0) 330 if (error < 0)
330 return -ERESTARTSYS; 331 return -ERESTARTSYS;
@@ -339,8 +340,8 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
339 */ 340 */
340int 341int
341cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, 342cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
342 unsigned int nvec, mid_callback_t *callback, void *cbdata, 343 unsigned int nvec, mid_receive_t *receive,
343 bool ignore_pend) 344 mid_callback_t *callback, void *cbdata, bool ignore_pend)
344{ 345{
345 int rc; 346 int rc;
346 struct mid_q_entry *mid; 347 struct mid_q_entry *mid;
@@ -374,6 +375,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
374 goto out_err; 375 goto out_err;
375 } 376 }
376 377
378 mid->receive = receive;
377 mid->callback = callback; 379 mid->callback = callback;
378 mid->callback_data = cbdata; 380 mid->callback_data = cbdata;
379 mid->midState = MID_REQUEST_SUBMITTED; 381 mid->midState = MID_REQUEST_SUBMITTED;