aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2015-03-27 03:27:19 -0400
committerJames Bottomley <JBottomley@Odin.com>2015-04-09 16:22:12 -0400
commitaaced9934d2231227fec4255f394eb86af818ce2 (patch)
tree433797b74824777e98414450a710227be14f7811
parent8de580742fee8bc34d116f57a20b22b9a5f08403 (diff)
scsi: storvsc: Don't assume that the scatterlist is not chained
The current code assumes that the scatterlists presented are not chained. Fix the code to not make this assumption. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reviewed-by: Long Li <longli@microsoft.com> Signed-off-by: James Bottomley <JBottomley@Odin.com>
-rw-r--r--drivers/scsi/storvsc_drv.c99
1 files changed, 57 insertions, 42 deletions
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f8e4dd925a9d..4156e297ea64 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -626,19 +626,6 @@ cleanup:
626 return NULL; 626 return NULL;
627} 627}
628 628
629/* Disgusting wrapper functions */
630static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
631{
632 void *addr = kmap_atomic(sg_page(sgl + idx));
633 return (unsigned long)addr;
634}
635
636static inline void sg_kunmap_atomic(unsigned long addr)
637{
638 kunmap_atomic((void *)addr);
639}
640
641
642/* Assume the original sgl has enough room */ 629/* Assume the original sgl has enough room */
643static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, 630static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
644 struct scatterlist *bounce_sgl, 631 struct scatterlist *bounce_sgl,
@@ -653,32 +640,38 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
653 unsigned long bounce_addr = 0; 640 unsigned long bounce_addr = 0;
654 unsigned long dest_addr = 0; 641 unsigned long dest_addr = 0;
655 unsigned long flags; 642 unsigned long flags;
643 struct scatterlist *cur_dest_sgl;
644 struct scatterlist *cur_src_sgl;
656 645
657 local_irq_save(flags); 646 local_irq_save(flags);
658 647 cur_dest_sgl = orig_sgl;
648 cur_src_sgl = bounce_sgl;
659 for (i = 0; i < orig_sgl_count; i++) { 649 for (i = 0; i < orig_sgl_count; i++) {
660 dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset; 650 dest_addr = (unsigned long)
651 kmap_atomic(sg_page(cur_dest_sgl)) +
652 cur_dest_sgl->offset;
661 dest = dest_addr; 653 dest = dest_addr;
662 destlen = orig_sgl[i].length; 654 destlen = cur_dest_sgl->length;
663 655
664 if (bounce_addr == 0) 656 if (bounce_addr == 0)
665 bounce_addr = sg_kmap_atomic(bounce_sgl,j); 657 bounce_addr = (unsigned long)kmap_atomic(
658 sg_page(cur_src_sgl));
666 659
667 while (destlen) { 660 while (destlen) {
668 src = bounce_addr + bounce_sgl[j].offset; 661 src = bounce_addr + cur_src_sgl->offset;
669 srclen = bounce_sgl[j].length - bounce_sgl[j].offset; 662 srclen = cur_src_sgl->length - cur_src_sgl->offset;
670 663
671 copylen = min(srclen, destlen); 664 copylen = min(srclen, destlen);
672 memcpy((void *)dest, (void *)src, copylen); 665 memcpy((void *)dest, (void *)src, copylen);
673 666
674 total_copied += copylen; 667 total_copied += copylen;
675 bounce_sgl[j].offset += copylen; 668 cur_src_sgl->offset += copylen;
676 destlen -= copylen; 669 destlen -= copylen;
677 dest += copylen; 670 dest += copylen;
678 671
679 if (bounce_sgl[j].offset == bounce_sgl[j].length) { 672 if (cur_src_sgl->offset == cur_src_sgl->length) {
680 /* full */ 673 /* full */
681 sg_kunmap_atomic(bounce_addr); 674 kunmap_atomic((void *)bounce_addr);
682 j++; 675 j++;
683 676
684 /* 677 /*
@@ -692,21 +685,27 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
692 /* 685 /*
693 * We are done; cleanup and return. 686 * We are done; cleanup and return.
694 */ 687 */
695 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset); 688 kunmap_atomic((void *)(dest_addr -
689 cur_dest_sgl->offset));
696 local_irq_restore(flags); 690 local_irq_restore(flags);
697 return total_copied; 691 return total_copied;
698 } 692 }
699 693
700 /* if we need to use another bounce buffer */ 694 /* if we need to use another bounce buffer */
701 if (destlen || i != orig_sgl_count - 1) 695 if (destlen || i != orig_sgl_count - 1) {
702 bounce_addr = sg_kmap_atomic(bounce_sgl,j); 696 cur_src_sgl = sg_next(cur_src_sgl);
697 bounce_addr = (unsigned long)
698 kmap_atomic(
699 sg_page(cur_src_sgl));
700 }
703 } else if (destlen == 0 && i == orig_sgl_count - 1) { 701 } else if (destlen == 0 && i == orig_sgl_count - 1) {
704 /* unmap the last bounce that is < PAGE_SIZE */ 702 /* unmap the last bounce that is < PAGE_SIZE */
705 sg_kunmap_atomic(bounce_addr); 703 kunmap_atomic((void *)bounce_addr);
706 } 704 }
707 } 705 }
708 706
709 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset); 707 kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
708 cur_dest_sgl = sg_next(cur_dest_sgl);
710 } 709 }
711 710
712 local_irq_restore(flags); 711 local_irq_restore(flags);
@@ -727,48 +726,61 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
727 unsigned long bounce_addr = 0; 726 unsigned long bounce_addr = 0;
728 unsigned long src_addr = 0; 727 unsigned long src_addr = 0;
729 unsigned long flags; 728 unsigned long flags;
729 struct scatterlist *cur_src_sgl;
730 struct scatterlist *cur_dest_sgl;
730 731
731 local_irq_save(flags); 732 local_irq_save(flags);
732 733
734 cur_src_sgl = orig_sgl;
735 cur_dest_sgl = bounce_sgl;
736
733 for (i = 0; i < orig_sgl_count; i++) { 737 for (i = 0; i < orig_sgl_count; i++) {
734 src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset; 738 src_addr = (unsigned long)
739 kmap_atomic(sg_page(cur_src_sgl)) +
740 cur_src_sgl->offset;
735 src = src_addr; 741 src = src_addr;
736 srclen = orig_sgl[i].length; 742 srclen = cur_src_sgl->length;
737 743
738 if (bounce_addr == 0) 744 if (bounce_addr == 0)
739 bounce_addr = sg_kmap_atomic(bounce_sgl,j); 745 bounce_addr = (unsigned long)
746 kmap_atomic(sg_page(cur_dest_sgl));
740 747
741 while (srclen) { 748 while (srclen) {
742 /* assume bounce offset always == 0 */ 749 /* assume bounce offset always == 0 */
743 dest = bounce_addr + bounce_sgl[j].length; 750 dest = bounce_addr + cur_dest_sgl->length;
744 destlen = PAGE_SIZE - bounce_sgl[j].length; 751 destlen = PAGE_SIZE - cur_dest_sgl->length;
745 752
746 copylen = min(srclen, destlen); 753 copylen = min(srclen, destlen);
747 memcpy((void *)dest, (void *)src, copylen); 754 memcpy((void *)dest, (void *)src, copylen);
748 755
749 total_copied += copylen; 756 total_copied += copylen;
750 bounce_sgl[j].length += copylen; 757 cur_dest_sgl->length += copylen;
751 srclen -= copylen; 758 srclen -= copylen;
752 src += copylen; 759 src += copylen;
753 760
754 if (bounce_sgl[j].length == PAGE_SIZE) { 761 if (cur_dest_sgl->length == PAGE_SIZE) {
755 /* full..move to next entry */ 762 /* full..move to next entry */
756 sg_kunmap_atomic(bounce_addr); 763 kunmap_atomic((void *)bounce_addr);
757 bounce_addr = 0; 764 bounce_addr = 0;
758 j++; 765 j++;
759 } 766 }
760 767
761 /* if we need to use another bounce buffer */ 768 /* if we need to use another bounce buffer */
762 if (srclen && bounce_addr == 0) 769 if (srclen && bounce_addr == 0) {
763 bounce_addr = sg_kmap_atomic(bounce_sgl, j); 770 cur_dest_sgl = sg_next(cur_dest_sgl);
771 bounce_addr = (unsigned long)
772 kmap_atomic(
773 sg_page(cur_dest_sgl));
774 }
764 775
765 } 776 }
766 777
767 sg_kunmap_atomic(src_addr - orig_sgl[i].offset); 778 kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
779 cur_src_sgl = sg_next(cur_src_sgl);
768 } 780 }
769 781
770 if (bounce_addr) 782 if (bounce_addr)
771 sg_kunmap_atomic(bounce_addr); 783 kunmap_atomic((void *)bounce_addr);
772 784
773 local_irq_restore(flags); 785 local_irq_restore(flags);
774 786
@@ -1536,6 +1548,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1536 struct scatterlist *sgl; 1548 struct scatterlist *sgl;
1537 unsigned int sg_count = 0; 1549 unsigned int sg_count = 0;
1538 struct vmscsi_request *vm_srb; 1550 struct vmscsi_request *vm_srb;
1551 struct scatterlist *cur_sgl;
1539 1552
1540 if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { 1553 if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
1541 /* 1554 /*
@@ -1617,10 +1630,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1617 } 1630 }
1618 1631
1619 cmd_request->data_buffer.offset = sgl[0].offset; 1632 cmd_request->data_buffer.offset = sgl[0].offset;
1620 1633 cur_sgl = sgl;
1621 for (i = 0; i < sg_count; i++) 1634 for (i = 0; i < sg_count; i++) {
1622 cmd_request->data_buffer.pfn_array[i] = 1635 cmd_request->data_buffer.pfn_array[i] =
1623 page_to_pfn(sg_page((&sgl[i]))); 1636 page_to_pfn(sg_page((cur_sgl)));
1637 cur_sgl = sg_next(cur_sgl);
1638 }
1624 1639
1625 } else if (scsi_sglist(scmnd)) { 1640 } else if (scsi_sglist(scmnd)) {
1626 cmd_request->data_buffer.offset = 1641 cmd_request->data_buffer.offset =