aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/connect.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/cifs/connect.c')
-rw-r--r--fs/cifs/connect.c515
1 files changed, 334 insertions, 181 deletions
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 97a65af2a08..f70d87d6ba6 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -320,27 +320,24 @@ requeue_echo:
320} 320}
321 321
322static bool 322static bool
323allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size, 323allocate_buffers(struct TCP_Server_Info *server)
324 bool is_large_buf)
325{ 324{
326 char *bbuf = *bigbuf, *sbuf = *smallbuf; 325 if (!server->bigbuf) {
327 326 server->bigbuf = (char *)cifs_buf_get();
328 if (bbuf == NULL) { 327 if (!server->bigbuf) {
329 bbuf = (char *)cifs_buf_get();
330 if (!bbuf) {
331 cERROR(1, "No memory for large SMB response"); 328 cERROR(1, "No memory for large SMB response");
332 msleep(3000); 329 msleep(3000);
333 /* retry will check if exiting */ 330 /* retry will check if exiting */
334 return false; 331 return false;
335 } 332 }
336 } else if (is_large_buf) { 333 } else if (server->large_buf) {
337 /* we are reusing a dirty large buf, clear its start */ 334 /* we are reusing a dirty large buf, clear its start */
338 memset(bbuf, 0, size); 335 memset(server->bigbuf, 0, sizeof(struct smb_hdr));
339 } 336 }
340 337
341 if (sbuf == NULL) { 338 if (!server->smallbuf) {
342 sbuf = (char *)cifs_small_buf_get(); 339 server->smallbuf = (char *)cifs_small_buf_get();
343 if (!sbuf) { 340 if (!server->smallbuf) {
344 cERROR(1, "No memory for SMB response"); 341 cERROR(1, "No memory for SMB response");
345 msleep(1000); 342 msleep(1000);
346 /* retry will check if exiting */ 343 /* retry will check if exiting */
@@ -349,12 +346,9 @@ allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size,
349 /* beginning of smb buffer is cleared in our buf_get */ 346 /* beginning of smb buffer is cleared in our buf_get */
350 } else { 347 } else {
351 /* if existing small buf clear beginning */ 348 /* if existing small buf clear beginning */
352 memset(sbuf, 0, size); 349 memset(server->smallbuf, 0, sizeof(struct smb_hdr));
353 } 350 }
354 351
355 *bigbuf = bbuf;
356 *smallbuf = sbuf;
357
358 return true; 352 return true;
359} 353}
360 354
@@ -375,14 +369,72 @@ server_unresponsive(struct TCP_Server_Info *server)
375 return false; 369 return false;
376} 370}
377 371
378static int 372/*
379read_from_socket(struct TCP_Server_Info *server, char *buf, 373 * kvec_array_init - clone a kvec array, and advance into it
380 unsigned int to_read) 374 * @new: pointer to memory for cloned array
375 * @iov: pointer to original array
376 * @nr_segs: number of members in original array
377 * @bytes: number of bytes to advance into the cloned array
378 *
379 * This function will copy the array provided in iov to a section of memory
380 * and advance the specified number of bytes into the new array. It returns
381 * the number of segments in the new array. "new" must be at least as big as
382 * the original iov array.
383 */
384static unsigned int
385kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
386 size_t bytes)
387{
388 size_t base = 0;
389
390 while (bytes || !iov->iov_len) {
391 int copy = min(bytes, iov->iov_len);
392
393 bytes -= copy;
394 base += copy;
395 if (iov->iov_len == base) {
396 iov++;
397 nr_segs--;
398 base = 0;
399 }
400 }
401 memcpy(new, iov, sizeof(*iov) * nr_segs);
402 new->iov_base += base;
403 new->iov_len -= base;
404 return nr_segs;
405}
406
407static struct kvec *
408get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
409{
410 struct kvec *new_iov;
411
412 if (server->iov && nr_segs <= server->nr_iov)
413 return server->iov;
414
415 /* not big enough -- allocate a new one and release the old */
416 new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
417 if (new_iov) {
418 kfree(server->iov);
419 server->iov = new_iov;
420 server->nr_iov = nr_segs;
421 }
422 return new_iov;
423}
424
425int
426cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
427 unsigned int nr_segs, unsigned int to_read)
381{ 428{
382 int length = 0; 429 int length = 0;
383 int total_read; 430 int total_read;
431 unsigned int segs;
384 struct msghdr smb_msg; 432 struct msghdr smb_msg;
385 struct kvec iov; 433 struct kvec *iov;
434
435 iov = get_server_iovec(server, nr_segs);
436 if (!iov)
437 return -ENOMEM;
386 438
387 smb_msg.msg_control = NULL; 439 smb_msg.msg_control = NULL;
388 smb_msg.msg_controllen = 0; 440 smb_msg.msg_controllen = 0;
@@ -393,10 +445,11 @@ read_from_socket(struct TCP_Server_Info *server, char *buf,
393 break; 445 break;
394 } 446 }
395 447
396 iov.iov_base = buf + total_read; 448 segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
397 iov.iov_len = to_read; 449
398 length = kernel_recvmsg(server->ssocket, &smb_msg, &iov, 1, 450 length = kernel_recvmsg(server->ssocket, &smb_msg,
399 to_read, 0); 451 iov, segs, to_read, 0);
452
400 if (server->tcpStatus == CifsExiting) { 453 if (server->tcpStatus == CifsExiting) {
401 total_read = -ESHUTDOWN; 454 total_read = -ESHUTDOWN;
402 break; 455 break;
@@ -426,6 +479,18 @@ read_from_socket(struct TCP_Server_Info *server, char *buf,
426 return total_read; 479 return total_read;
427} 480}
428 481
482int
483cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
484 unsigned int to_read)
485{
486 struct kvec iov;
487
488 iov.iov_base = buf;
489 iov.iov_len = to_read;
490
491 return cifs_readv_from_socket(server, &iov, 1, to_read);
492}
493
429static bool 494static bool
430is_smb_response(struct TCP_Server_Info *server, unsigned char type) 495is_smb_response(struct TCP_Server_Info *server, unsigned char type)
431{ 496{
@@ -471,61 +536,76 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
471} 536}
472 537
473static struct mid_q_entry * 538static struct mid_q_entry *
474find_cifs_mid(struct TCP_Server_Info *server, struct smb_hdr *buf, 539find_mid(struct TCP_Server_Info *server, struct smb_hdr *buf)
475 int *length, bool is_large_buf, bool *is_multi_rsp, char **bigbuf)
476{ 540{
477 struct mid_q_entry *mid = NULL, *tmp_mid, *ret = NULL; 541 struct mid_q_entry *mid;
478 542
479 spin_lock(&GlobalMid_Lock); 543 spin_lock(&GlobalMid_Lock);
480 list_for_each_entry_safe(mid, tmp_mid, &server->pending_mid_q, qhead) { 544 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
481 if (mid->mid != buf->Mid || 545 if (mid->mid == buf->Mid &&
482 mid->midState != MID_REQUEST_SUBMITTED || 546 mid->midState == MID_REQUEST_SUBMITTED &&
483 mid->command != buf->Command) 547 mid->command == buf->Command) {
484 continue; 548 spin_unlock(&GlobalMid_Lock);
485 549 return mid;
486 if (*length == 0 && check2ndT2(buf) > 0) {
487 /* We have a multipart transact2 resp */
488 *is_multi_rsp = true;
489 if (mid->resp_buf) {
490 /* merge response - fix up 1st*/
491 *length = coalesce_t2(buf, mid->resp_buf);
492 if (*length > 0) {
493 *length = 0;
494 mid->multiRsp = true;
495 break;
496 }
497 /* All parts received or packet is malformed. */
498 mid->multiEnd = true;
499 goto multi_t2_fnd;
500 }
501 if (!is_large_buf) {
502 /*FIXME: switch to already allocated largebuf?*/
503 cERROR(1, "1st trans2 resp needs bigbuf");
504 } else {
505 /* Have first buffer */
506 mid->resp_buf = buf;
507 mid->largeBuf = true;
508 *bigbuf = NULL;
509 }
510 break;
511 } 550 }
512 mid->resp_buf = buf; 551 }
513 mid->largeBuf = is_large_buf; 552 spin_unlock(&GlobalMid_Lock);
514multi_t2_fnd: 553 return NULL;
515 if (*length == 0) 554}
516 mid->midState = MID_RESPONSE_RECEIVED; 555
517 else 556void
518 mid->midState = MID_RESPONSE_MALFORMED; 557dequeue_mid(struct mid_q_entry *mid, bool malformed)
558{
519#ifdef CONFIG_CIFS_STATS2 559#ifdef CONFIG_CIFS_STATS2
520 mid->when_received = jiffies; 560 mid->when_received = jiffies;
521#endif 561#endif
522 list_del_init(&mid->qhead); 562 spin_lock(&GlobalMid_Lock);
523 ret = mid; 563 if (!malformed)
524 break; 564 mid->midState = MID_RESPONSE_RECEIVED;
525 } 565 else
566 mid->midState = MID_RESPONSE_MALFORMED;
567 list_del_init(&mid->qhead);
526 spin_unlock(&GlobalMid_Lock); 568 spin_unlock(&GlobalMid_Lock);
569}
527 570
528 return ret; 571static void
572handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
573 struct smb_hdr *buf, int malformed)
574{
575 if (malformed == 0 && check2ndT2(buf) > 0) {
576 mid->multiRsp = true;
577 if (mid->resp_buf) {
578 /* merge response - fix up 1st*/
579 malformed = coalesce_t2(buf, mid->resp_buf);
580 if (malformed > 0)
581 return;
582
583 /* All parts received or packet is malformed. */
584 mid->multiEnd = true;
585 return dequeue_mid(mid, malformed);
586 }
587 if (!server->large_buf) {
588 /*FIXME: switch to already allocated largebuf?*/
589 cERROR(1, "1st trans2 resp needs bigbuf");
590 } else {
591 /* Have first buffer */
592 mid->resp_buf = buf;
593 mid->largeBuf = true;
594 server->bigbuf = NULL;
595 }
596 return;
597 }
598 mid->resp_buf = buf;
599 mid->largeBuf = server->large_buf;
600 /* Was previous buf put in mpx struct for multi-rsp? */
601 if (!mid->multiRsp) {
602 /* smb buffer will be freed by user thread */
603 if (server->large_buf)
604 server->bigbuf = NULL;
605 else
606 server->smallbuf = NULL;
607 }
608 dequeue_mid(mid, malformed);
529} 609}
530 610
531static void clean_demultiplex_info(struct TCP_Server_Info *server) 611static void clean_demultiplex_info(struct TCP_Server_Info *server)
@@ -615,6 +695,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
615 } 695 }
616 696
617 kfree(server->hostname); 697 kfree(server->hostname);
698 kfree(server->iov);
618 kfree(server); 699 kfree(server);
619 700
620 length = atomic_dec_return(&tcpSesAllocCount); 701 length = atomic_dec_return(&tcpSesAllocCount);
@@ -624,17 +705,70 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
624} 705}
625 706
626static int 707static int
708standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
709{
710 int length;
711 char *buf = server->smallbuf;
712 struct smb_hdr *smb_buffer = (struct smb_hdr *)buf;
713 unsigned int pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
714
715 /* make sure this will fit in a large buffer */
716 if (pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
717 cERROR(1, "SMB response too long (%u bytes)",
718 pdu_length);
719 cifs_reconnect(server);
720 wake_up(&server->response_q);
721 return -EAGAIN;
722 }
723
724 /* switch to large buffer if too big for a small one */
725 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
726 server->large_buf = true;
727 memcpy(server->bigbuf, server->smallbuf, server->total_read);
728 buf = server->bigbuf;
729 smb_buffer = (struct smb_hdr *)buf;
730 }
731
732 /* now read the rest */
733 length = cifs_read_from_socket(server,
734 buf + sizeof(struct smb_hdr) - 1,
735 pdu_length - sizeof(struct smb_hdr) + 1 + 4);
736 if (length < 0)
737 return length;
738 server->total_read += length;
739
740 dump_smb(smb_buffer, server->total_read);
741
742 /*
743 * We know that we received enough to get to the MID as we
744 * checked the pdu_length earlier. Now check to see
745 * if the rest of the header is OK. We borrow the length
746 * var for the rest of the loop to avoid a new stack var.
747 *
748 * 48 bytes is enough to display the header and a little bit
749 * into the payload for debugging purposes.
750 */
751 length = checkSMB(smb_buffer, smb_buffer->Mid, server->total_read);
752 if (length != 0)
753 cifs_dump_mem("Bad SMB: ", buf,
754 min_t(unsigned int, server->total_read, 48));
755
756 if (mid)
757 handle_mid(mid, server, smb_buffer, length);
758
759 return length;
760}
761
762static int
627cifs_demultiplex_thread(void *p) 763cifs_demultiplex_thread(void *p)
628{ 764{
629 int length; 765 int length;
630 struct TCP_Server_Info *server = p; 766 struct TCP_Server_Info *server = p;
631 unsigned int pdu_length, total_read; 767 unsigned int pdu_length;
632 char *buf = NULL, *bigbuf = NULL, *smallbuf = NULL; 768 char *buf = NULL;
633 struct smb_hdr *smb_buffer = NULL; 769 struct smb_hdr *smb_buffer = NULL;
634 struct task_struct *task_to_wake = NULL; 770 struct task_struct *task_to_wake = NULL;
635 struct mid_q_entry *mid_entry; 771 struct mid_q_entry *mid_entry;
636 bool isLargeBuf = false;
637 bool isMultiRsp = false;
638 772
639 current->flags |= PF_MEMALLOC; 773 current->flags |= PF_MEMALLOC;
640 cFYI(1, "Demultiplex PID: %d", task_pid_nr(current)); 774 cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
@@ -649,20 +783,18 @@ cifs_demultiplex_thread(void *p)
649 if (try_to_freeze()) 783 if (try_to_freeze())
650 continue; 784 continue;
651 785
652 if (!allocate_buffers(&bigbuf, &smallbuf, 786 if (!allocate_buffers(server))
653 sizeof(struct smb_hdr), isLargeBuf))
654 continue; 787 continue;
655 788
656 isLargeBuf = false; 789 server->large_buf = false;
657 isMultiRsp = false; 790 smb_buffer = (struct smb_hdr *)server->smallbuf;
658 smb_buffer = (struct smb_hdr *)smallbuf; 791 buf = server->smallbuf;
659 buf = smallbuf;
660 pdu_length = 4; /* enough to get RFC1001 header */ 792 pdu_length = 4; /* enough to get RFC1001 header */
661 793
662 length = read_from_socket(server, buf, pdu_length); 794 length = cifs_read_from_socket(server, buf, pdu_length);
663 if (length < 0) 795 if (length < 0)
664 continue; 796 continue;
665 total_read = length; 797 server->total_read = length;
666 798
667 /* 799 /*
668 * The right amount was read from socket - 4 bytes, 800 * The right amount was read from socket - 4 bytes,
@@ -674,64 +806,42 @@ cifs_demultiplex_thread(void *p)
674 if (!is_smb_response(server, buf[0])) 806 if (!is_smb_response(server, buf[0]))
675 continue; 807 continue;
676 808
677 /* check the length */ 809 /* make sure we have enough to get to the MID */
678 if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) || 810 if (pdu_length < sizeof(struct smb_hdr) - 1 - 4) {
679 (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) { 811 cERROR(1, "SMB response too short (%u bytes)",
680 cERROR(1, "Invalid size SMB length %d pdu_length %d", 812 pdu_length);
681 4, pdu_length + 4);
682 cifs_reconnect(server); 813 cifs_reconnect(server);
683 wake_up(&server->response_q); 814 wake_up(&server->response_q);
684 continue; 815 continue;
685 } 816 }
686 817
687 /* else length ok */ 818 /* read down to the MID */
688 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { 819 length = cifs_read_from_socket(server, buf + 4,
689 isLargeBuf = true; 820 sizeof(struct smb_hdr) - 1 - 4);
690 memcpy(bigbuf, smallbuf, 4);
691 smb_buffer = (struct smb_hdr *)bigbuf;
692 buf = bigbuf;
693 }
694
695 length = read_from_socket(server, buf + 4, pdu_length);
696 if (length < 0) 821 if (length < 0)
697 continue; 822 continue;
698 total_read += length; 823 server->total_read += length;
699 824
700 dump_smb(smb_buffer, total_read); 825 mid_entry = find_mid(server, smb_buffer);
701 826
702 /* 827 if (!mid_entry || !mid_entry->receive)
703 * We know that we received enough to get to the MID as we 828 length = standard_receive3(server, mid_entry);
704 * checked the pdu_length earlier. Now check to see 829 else
705 * if the rest of the header is OK. We borrow the length 830 length = mid_entry->receive(server, mid_entry);
706 * var for the rest of the loop to avoid a new stack var.
707 *
708 * 48 bytes is enough to display the header and a little bit
709 * into the payload for debugging purposes.
710 */
711 length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
712 if (length != 0)
713 cifs_dump_mem("Bad SMB: ", buf,
714 min_t(unsigned int, total_read, 48));
715 831
716 server->lstrp = jiffies; 832 if (length < 0)
833 continue;
717 834
718 mid_entry = find_cifs_mid(server, smb_buffer, &length, 835 if (server->large_buf) {
719 isLargeBuf, &isMultiRsp, &bigbuf); 836 buf = server->bigbuf;
837 smb_buffer = (struct smb_hdr *)buf;
838 }
839
840 server->lstrp = jiffies;
720 if (mid_entry != NULL) { 841 if (mid_entry != NULL) {
721 mid_entry->callback(mid_entry); 842 if (!mid_entry->multiRsp || mid_entry->multiEnd)
722 /* Was previous buf put in mpx struct for multi-rsp? */ 843 mid_entry->callback(mid_entry);
723 if (!isMultiRsp) { 844 } else if (!is_valid_oplock_break(smb_buffer, server)) {
724 /* smb buffer will be freed by user thread */
725 if (isLargeBuf)
726 bigbuf = NULL;
727 else
728 smallbuf = NULL;
729 }
730 } else if (length != 0) {
731 /* response sanity checks failed */
732 continue;
733 } else if (!is_valid_oplock_break(smb_buffer, server) &&
734 !isMultiRsp) {
735 cERROR(1, "No task to wake, unknown frame received! " 845 cERROR(1, "No task to wake, unknown frame received! "
736 "NumMids %d", atomic_read(&midCount)); 846 "NumMids %d", atomic_read(&midCount));
737 cifs_dump_mem("Received Data is: ", buf, 847 cifs_dump_mem("Received Data is: ", buf,
@@ -745,9 +855,9 @@ cifs_demultiplex_thread(void *p)
745 } /* end while !EXITING */ 855 } /* end while !EXITING */
746 856
747 /* buffer usually freed in free_mid - need to free it here on exit */ 857 /* buffer usually freed in free_mid - need to free it here on exit */
748 cifs_buf_release(bigbuf); 858 cifs_buf_release(server->bigbuf);
749 if (smallbuf) /* no sense logging a debug message if NULL */ 859 if (server->smallbuf) /* no sense logging a debug message if NULL */
750 cifs_small_buf_release(smallbuf); 860 cifs_small_buf_release(server->smallbuf);
751 861
752 task_to_wake = xchg(&server->tsk, NULL); 862 task_to_wake = xchg(&server->tsk, NULL);
753 clean_demultiplex_info(server); 863 clean_demultiplex_info(server);
@@ -2200,16 +2310,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2200 (new->mnt_cifs_flags & CIFS_MOUNT_MASK)) 2310 (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
2201 return 0; 2311 return 0;
2202 2312
2203 if (old->rsize != new->rsize)
2204 return 0;
2205
2206 /* 2313 /*
2207 * We want to share sb only if we don't specify wsize or specified wsize 2314 * We want to share sb only if we don't specify an r/wsize or
2208 * is greater or equal than existing one. 2315 * specified r/wsize is greater than or equal to existing one.
2209 */ 2316 */
2210 if (new->wsize && new->wsize < old->wsize) 2317 if (new->wsize && new->wsize < old->wsize)
2211 return 0; 2318 return 0;
2212 2319
2320 if (new->rsize && new->rsize < old->rsize)
2321 return 0;
2322
2213 if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid) 2323 if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
2214 return 0; 2324 return 0;
2215 2325
@@ -2647,14 +2757,6 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
2647 CIFS_MOUNT_POSIX_PATHS; 2757 CIFS_MOUNT_POSIX_PATHS;
2648 } 2758 }
2649 2759
2650 if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
2651 if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
2652 cifs_sb->rsize = 127 * 1024;
2653 cFYI(DBG2, "larger reads not supported by srv");
2654 }
2655 }
2656
2657
2658 cFYI(1, "Negotiate caps 0x%x", (int)cap); 2760 cFYI(1, "Negotiate caps 0x%x", (int)cap);
2659#ifdef CONFIG_CIFS_DEBUG2 2761#ifdef CONFIG_CIFS_DEBUG2
2660 if (cap & CIFS_UNIX_FCNTL_CAP) 2762 if (cap & CIFS_UNIX_FCNTL_CAP)
@@ -2699,27 +2801,11 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2699 spin_lock_init(&cifs_sb->tlink_tree_lock); 2801 spin_lock_init(&cifs_sb->tlink_tree_lock);
2700 cifs_sb->tlink_tree = RB_ROOT; 2802 cifs_sb->tlink_tree = RB_ROOT;
2701 2803
2702 if (pvolume_info->rsize > CIFSMaxBufSize) {
2703 cERROR(1, "rsize %d too large, using MaxBufSize",
2704 pvolume_info->rsize);
2705 cifs_sb->rsize = CIFSMaxBufSize;
2706 } else if ((pvolume_info->rsize) &&
2707 (pvolume_info->rsize <= CIFSMaxBufSize))
2708 cifs_sb->rsize = pvolume_info->rsize;
2709 else /* default */
2710 cifs_sb->rsize = CIFSMaxBufSize;
2711
2712 if (cifs_sb->rsize < 2048) {
2713 cifs_sb->rsize = 2048;
2714 /* Windows ME may prefer this */
2715 cFYI(1, "readsize set to minimum: 2048");
2716 }
2717
2718 /* 2804 /*
2719 * Temporarily set wsize for matching superblock. If we end up using 2805 * Temporarily set r/wsize for matching superblock. If we end up using
2720 * new sb then cifs_negotiate_wsize will later negotiate it downward 2806 * new sb then client will later negotiate it downward if needed.
2721 * if needed.
2722 */ 2807 */
2808 cifs_sb->rsize = pvolume_info->rsize;
2723 cifs_sb->wsize = pvolume_info->wsize; 2809 cifs_sb->wsize = pvolume_info->wsize;
2724 2810
2725 cifs_sb->mnt_uid = pvolume_info->linux_uid; 2811 cifs_sb->mnt_uid = pvolume_info->linux_uid;
@@ -2794,29 +2880,41 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
2794} 2880}
2795 2881
2796/* 2882/*
2797 * When the server supports very large writes via POSIX extensions, we can 2883 * When the server supports very large reads and writes via POSIX extensions,
2798 * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including 2884 * we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
2799 * the RFC1001 length. 2885 * including the RFC1001 length.
2800 * 2886 *
2801 * Note that this might make for "interesting" allocation problems during 2887 * Note that this might make for "interesting" allocation problems during
2802 * writeback however as we have to allocate an array of pointers for the 2888 * writeback however as we have to allocate an array of pointers for the
2803 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 2889 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
2890 *
2891 * For reads, there is a similar problem as we need to allocate an array
2892 * of kvecs to handle the receive, though that should only need to be done
2893 * once.
2804 */ 2894 */
2805#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4) 2895#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
2896#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
2806 2897
2807/* 2898/*
2808 * When the server doesn't allow large posix writes, only allow a wsize of 2899 * When the server doesn't allow large posix writes, only allow a rsize/wsize
2809 * 2^17-1 minus the size of the WRITE_AND_X header. That allows for a write up 2900 * of 2^17-1 minus the size of the call header. That allows for a read or
2810 * to the maximum size described by RFC1002. 2901 * write up to the maximum size described by RFC1002.
2811 */ 2902 */
2812#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4) 2903#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
2904#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
2813 2905
2814/* 2906/*
2815 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 2907 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
2816 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill 2908 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
2817 * a single wsize request with a single call. 2909 * a single wsize request with a single call.
2818 */ 2910 */
2819#define CIFS_DEFAULT_WSIZE (1024 * 1024) 2911#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
2912
2913/*
2914 * Windows only supports a max of 60k reads. Default to that when posix
2915 * extensions aren't in force.
2916 */
2917#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
2820 2918
2821static unsigned int 2919static unsigned int
2822cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) 2920cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
@@ -2824,7 +2922,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2824 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); 2922 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2825 struct TCP_Server_Info *server = tcon->ses->server; 2923 struct TCP_Server_Info *server = tcon->ses->server;
2826 unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize : 2924 unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
2827 CIFS_DEFAULT_WSIZE; 2925 CIFS_DEFAULT_IOSIZE;
2828 2926
2829 /* can server support 24-bit write sizes? (via UNIX extensions) */ 2927 /* can server support 24-bit write sizes? (via UNIX extensions) */
2830 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) 2928 if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
@@ -2847,6 +2945,50 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2847 return wsize; 2945 return wsize;
2848} 2946}
2849 2947
2948static unsigned int
2949cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
2950{
2951 __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
2952 struct TCP_Server_Info *server = tcon->ses->server;
2953 unsigned int rsize, defsize;
2954
2955 /*
2956 * Set default value...
2957 *
2958 * HACK alert! Ancient servers have very small buffers. Even though
2959 * MS-CIFS indicates that servers are only limited by the client's
2960 * bufsize for reads, testing against win98se shows that it throws
2961 * INVALID_PARAMETER errors if you try to request too large a read.
2962 *
2963 * If the server advertises a MaxBufferSize of less than one page,
2964 * assume that it also can't satisfy reads larger than that either.
2965 *
2966 * FIXME: Is there a better heuristic for this?
2967 */
2968 if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
2969 defsize = CIFS_DEFAULT_IOSIZE;
2970 else if (server->capabilities & CAP_LARGE_READ_X)
2971 defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
2972 else if (server->maxBuf >= PAGE_CACHE_SIZE)
2973 defsize = CIFSMaxBufSize;
2974 else
2975 defsize = server->maxBuf - sizeof(READ_RSP);
2976
2977 rsize = pvolume_info->rsize ? pvolume_info->rsize : defsize;
2978
2979 /*
2980 * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
2981 * the client's MaxBufferSize.
2982 */
2983 if (!(server->capabilities & CAP_LARGE_READ_X))
2984 rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
2985
2986 /* hard limit of CIFS_MAX_RSIZE */
2987 rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
2988
2989 return rsize;
2990}
2991
2850static int 2992static int
2851is_path_accessible(int xid, struct cifs_tcon *tcon, 2993is_path_accessible(int xid, struct cifs_tcon *tcon,
2852 struct cifs_sb_info *cifs_sb, const char *full_path) 2994 struct cifs_sb_info *cifs_sb, const char *full_path)
@@ -3040,6 +3182,22 @@ cifs_get_volume_info(char *mount_data, const char *devname)
3040 return volume_info; 3182 return volume_info;
3041} 3183}
3042 3184
3185/* make sure ra_pages is a multiple of rsize */
3186static inline unsigned int
3187cifs_ra_pages(struct cifs_sb_info *cifs_sb)
3188{
3189 unsigned int reads;
3190 unsigned int rsize_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
3191
3192 if (rsize_pages >= default_backing_dev_info.ra_pages)
3193 return default_backing_dev_info.ra_pages;
3194 else if (rsize_pages == 0)
3195 return rsize_pages;
3196
3197 reads = default_backing_dev_info.ra_pages / rsize_pages;
3198 return reads * rsize_pages;
3199}
3200
3043int 3201int
3044cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) 3202cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
3045{ 3203{
@@ -3058,8 +3216,6 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
3058 if (rc) 3216 if (rc)
3059 return rc; 3217 return rc;
3060 3218
3061 cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
3062
3063#ifdef CONFIG_CIFS_DFS_UPCALL 3219#ifdef CONFIG_CIFS_DFS_UPCALL
3064try_mount_again: 3220try_mount_again:
3065 /* cleanup activities if we're chasing a referral */ 3221 /* cleanup activities if we're chasing a referral */
@@ -3124,14 +3280,11 @@ try_mount_again:
3124 CIFSSMBQFSAttributeInfo(xid, tcon); 3280 CIFSSMBQFSAttributeInfo(xid, tcon);
3125 } 3281 }
3126 3282
3127 if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
3128 cifs_sb->rsize = 1024 * 127;
3129 cFYI(DBG2, "no very large read support, rsize now 127K");
3130 }
3131 if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
3132 cifs_sb->rsize = min(cifs_sb->rsize, CIFSMaxBufSize);
3133
3134 cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info); 3283 cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
3284 cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
3285
3286 /* tune readahead according to rsize */
3287 cifs_sb->bdi.ra_pages = cifs_ra_pages(cifs_sb);
3135 3288
3136remote_path_check: 3289remote_path_check:
3137#ifdef CONFIG_CIFS_DFS_UPCALL 3290#ifdef CONFIG_CIFS_DFS_UPCALL