aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/storvsc_drv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/storvsc_drv.c')
-rw-r--r--drivers/scsi/storvsc_drv.c294
1 files changed, 10 insertions, 284 deletions
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 40c43aeb4ff3..3fba42ad9fb8 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -349,11 +349,14 @@ enum storvsc_request_type {
349 */ 349 */
350 350
351#define SRB_STATUS_AUTOSENSE_VALID 0x80 351#define SRB_STATUS_AUTOSENSE_VALID 0x80
352#define SRB_STATUS_QUEUE_FROZEN 0x40
352#define SRB_STATUS_INVALID_LUN 0x20 353#define SRB_STATUS_INVALID_LUN 0x20
353#define SRB_STATUS_SUCCESS 0x01 354#define SRB_STATUS_SUCCESS 0x01
354#define SRB_STATUS_ABORTED 0x02 355#define SRB_STATUS_ABORTED 0x02
355#define SRB_STATUS_ERROR 0x04 356#define SRB_STATUS_ERROR 0x04
356 357
358#define SRB_STATUS(status) \
359 (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
357/* 360/*
358 * This is the end of Protocol specific defines. 361 * This is the end of Protocol specific defines.
359 */ 362 */
@@ -393,9 +396,6 @@ static void storvsc_on_channel_callback(void *context);
393struct storvsc_cmd_request { 396struct storvsc_cmd_request {
394 struct scsi_cmnd *cmd; 397 struct scsi_cmnd *cmd;
395 398
396 unsigned int bounce_sgl_count;
397 struct scatterlist *bounce_sgl;
398
399 struct hv_device *device; 399 struct hv_device *device;
400 400
401 /* Synchronize the request/response if needed */ 401 /* Synchronize the request/response if needed */
@@ -586,241 +586,6 @@ get_in_err:
586 586
587} 587}
588 588
589static void destroy_bounce_buffer(struct scatterlist *sgl,
590 unsigned int sg_count)
591{
592 int i;
593 struct page *page_buf;
594
595 for (i = 0; i < sg_count; i++) {
596 page_buf = sg_page((&sgl[i]));
597 if (page_buf != NULL)
598 __free_page(page_buf);
599 }
600
601 kfree(sgl);
602}
603
604static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
605{
606 int i;
607
608 /* No need to check */
609 if (sg_count < 2)
610 return -1;
611
612 /* We have at least 2 sg entries */
613 for (i = 0; i < sg_count; i++) {
614 if (i == 0) {
615 /* make sure 1st one does not have hole */
616 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
617 return i;
618 } else if (i == sg_count - 1) {
619 /* make sure last one does not have hole */
620 if (sgl[i].offset != 0)
621 return i;
622 } else {
623 /* make sure no hole in the middle */
624 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
625 return i;
626 }
627 }
628 return -1;
629}
630
631static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
632 unsigned int sg_count,
633 unsigned int len,
634 int write)
635{
636 int i;
637 int num_pages;
638 struct scatterlist *bounce_sgl;
639 struct page *page_buf;
640 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
641
642 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
643
644 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
645 if (!bounce_sgl)
646 return NULL;
647
648 sg_init_table(bounce_sgl, num_pages);
649 for (i = 0; i < num_pages; i++) {
650 page_buf = alloc_page(GFP_ATOMIC);
651 if (!page_buf)
652 goto cleanup;
653 sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
654 }
655
656 return bounce_sgl;
657
658cleanup:
659 destroy_bounce_buffer(bounce_sgl, num_pages);
660 return NULL;
661}
662
663/* Assume the original sgl has enough room */
664static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
665 struct scatterlist *bounce_sgl,
666 unsigned int orig_sgl_count,
667 unsigned int bounce_sgl_count)
668{
669 int i;
670 int j = 0;
671 unsigned long src, dest;
672 unsigned int srclen, destlen, copylen;
673 unsigned int total_copied = 0;
674 unsigned long bounce_addr = 0;
675 unsigned long dest_addr = 0;
676 unsigned long flags;
677 struct scatterlist *cur_dest_sgl;
678 struct scatterlist *cur_src_sgl;
679
680 local_irq_save(flags);
681 cur_dest_sgl = orig_sgl;
682 cur_src_sgl = bounce_sgl;
683 for (i = 0; i < orig_sgl_count; i++) {
684 dest_addr = (unsigned long)
685 kmap_atomic(sg_page(cur_dest_sgl)) +
686 cur_dest_sgl->offset;
687 dest = dest_addr;
688 destlen = cur_dest_sgl->length;
689
690 if (bounce_addr == 0)
691 bounce_addr = (unsigned long)kmap_atomic(
692 sg_page(cur_src_sgl));
693
694 while (destlen) {
695 src = bounce_addr + cur_src_sgl->offset;
696 srclen = cur_src_sgl->length - cur_src_sgl->offset;
697
698 copylen = min(srclen, destlen);
699 memcpy((void *)dest, (void *)src, copylen);
700
701 total_copied += copylen;
702 cur_src_sgl->offset += copylen;
703 destlen -= copylen;
704 dest += copylen;
705
706 if (cur_src_sgl->offset == cur_src_sgl->length) {
707 /* full */
708 kunmap_atomic((void *)bounce_addr);
709 j++;
710
711 /*
712 * It is possible that the number of elements
713 * in the bounce buffer may not be equal to
714 * the number of elements in the original
715 * scatter list. Handle this correctly.
716 */
717
718 if (j == bounce_sgl_count) {
719 /*
720 * We are done; cleanup and return.
721 */
722 kunmap_atomic((void *)(dest_addr -
723 cur_dest_sgl->offset));
724 local_irq_restore(flags);
725 return total_copied;
726 }
727
728 /* if we need to use another bounce buffer */
729 if (destlen || i != orig_sgl_count - 1) {
730 cur_src_sgl = sg_next(cur_src_sgl);
731 bounce_addr = (unsigned long)
732 kmap_atomic(
733 sg_page(cur_src_sgl));
734 }
735 } else if (destlen == 0 && i == orig_sgl_count - 1) {
736 /* unmap the last bounce that is < PAGE_SIZE */
737 kunmap_atomic((void *)bounce_addr);
738 }
739 }
740
741 kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
742 cur_dest_sgl = sg_next(cur_dest_sgl);
743 }
744
745 local_irq_restore(flags);
746
747 return total_copied;
748}
749
750/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
751static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
752 struct scatterlist *bounce_sgl,
753 unsigned int orig_sgl_count)
754{
755 int i;
756 int j = 0;
757 unsigned long src, dest;
758 unsigned int srclen, destlen, copylen;
759 unsigned int total_copied = 0;
760 unsigned long bounce_addr = 0;
761 unsigned long src_addr = 0;
762 unsigned long flags;
763 struct scatterlist *cur_src_sgl;
764 struct scatterlist *cur_dest_sgl;
765
766 local_irq_save(flags);
767
768 cur_src_sgl = orig_sgl;
769 cur_dest_sgl = bounce_sgl;
770
771 for (i = 0; i < orig_sgl_count; i++) {
772 src_addr = (unsigned long)
773 kmap_atomic(sg_page(cur_src_sgl)) +
774 cur_src_sgl->offset;
775 src = src_addr;
776 srclen = cur_src_sgl->length;
777
778 if (bounce_addr == 0)
779 bounce_addr = (unsigned long)
780 kmap_atomic(sg_page(cur_dest_sgl));
781
782 while (srclen) {
783 /* assume bounce offset always == 0 */
784 dest = bounce_addr + cur_dest_sgl->length;
785 destlen = PAGE_SIZE - cur_dest_sgl->length;
786
787 copylen = min(srclen, destlen);
788 memcpy((void *)dest, (void *)src, copylen);
789
790 total_copied += copylen;
791 cur_dest_sgl->length += copylen;
792 srclen -= copylen;
793 src += copylen;
794
795 if (cur_dest_sgl->length == PAGE_SIZE) {
796 /* full..move to next entry */
797 kunmap_atomic((void *)bounce_addr);
798 bounce_addr = 0;
799 j++;
800 }
801
802 /* if we need to use another bounce buffer */
803 if (srclen && bounce_addr == 0) {
804 cur_dest_sgl = sg_next(cur_dest_sgl);
805 bounce_addr = (unsigned long)
806 kmap_atomic(
807 sg_page(cur_dest_sgl));
808 }
809
810 }
811
812 kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
813 cur_src_sgl = sg_next(cur_src_sgl);
814 }
815
816 if (bounce_addr)
817 kunmap_atomic((void *)bounce_addr);
818
819 local_irq_restore(flags);
820
821 return total_copied;
822}
823
824static void handle_sc_creation(struct vmbus_channel *new_sc) 589static void handle_sc_creation(struct vmbus_channel *new_sc)
825{ 590{
826 struct hv_device *device = new_sc->primary_channel->device_obj; 591 struct hv_device *device = new_sc->primary_channel->device_obj;
@@ -1096,7 +861,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
1096 void (*process_err_fn)(struct work_struct *work); 861 void (*process_err_fn)(struct work_struct *work);
1097 bool do_work = false; 862 bool do_work = false;
1098 863
1099 switch (vm_srb->srb_status) { 864 switch (SRB_STATUS(vm_srb->srb_status)) {
1100 case SRB_STATUS_ERROR: 865 case SRB_STATUS_ERROR:
1101 /* 866 /*
1102 * If there is an error; offline the device since all 867 * If there is an error; offline the device since all
@@ -1171,15 +936,6 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
1171 host = stor_dev->host; 936 host = stor_dev->host;
1172 937
1173 vm_srb = &cmd_request->vstor_packet.vm_srb; 938 vm_srb = &cmd_request->vstor_packet.vm_srb;
1174 if (cmd_request->bounce_sgl_count) {
1175 if (vm_srb->data_in == READ_TYPE)
1176 copy_from_bounce_buffer(scsi_sglist(scmnd),
1177 cmd_request->bounce_sgl,
1178 scsi_sg_count(scmnd),
1179 cmd_request->bounce_sgl_count);
1180 destroy_bounce_buffer(cmd_request->bounce_sgl,
1181 cmd_request->bounce_sgl_count);
1182 }
1183 939
1184 scmnd->result = vm_srb->scsi_status; 940 scmnd->result = vm_srb->scsi_status;
1185 941
@@ -1474,6 +1230,9 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
1474 1230
1475 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); 1231 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
1476 1232
1233 /* Ensure there are no gaps in presented sgls */
1234 blk_queue_virt_boundary(sdevice->request_queue, PAGE_SIZE - 1);
1235
1477 sdevice->no_write_same = 1; 1236 sdevice->no_write_same = 1;
1478 1237
1479 /* 1238 /*
@@ -1647,8 +1406,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1647 vm_srb->win8_extension.time_out_value = 60; 1406 vm_srb->win8_extension.time_out_value = 60;
1648 1407
1649 vm_srb->win8_extension.srb_flags |= 1408 vm_srb->win8_extension.srb_flags |=
1650 (SRB_FLAGS_QUEUE_ACTION_ENABLE | 1409 SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
1651 SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
1652 1410
1653 /* Build the SRB */ 1411 /* Build the SRB */
1654 switch (scmnd->sc_data_direction) { 1412 switch (scmnd->sc_data_direction) {
@@ -1692,40 +1450,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1692 payload_sz = sizeof(cmd_request->mpb); 1450 payload_sz = sizeof(cmd_request->mpb);
1693 1451
1694 if (sg_count) { 1452 if (sg_count) {
1695 /* check if we need to bounce the sgl */
1696 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1697 cmd_request->bounce_sgl =
1698 create_bounce_buffer(sgl, sg_count,
1699 length,
1700 vm_srb->data_in);
1701 if (!cmd_request->bounce_sgl)
1702 return SCSI_MLQUEUE_HOST_BUSY;
1703
1704 cmd_request->bounce_sgl_count =
1705 ALIGN(length, PAGE_SIZE) >> PAGE_SHIFT;
1706
1707 if (vm_srb->data_in == WRITE_TYPE)
1708 copy_to_bounce_buffer(sgl,
1709 cmd_request->bounce_sgl, sg_count);
1710
1711 sgl = cmd_request->bounce_sgl;
1712 sg_count = cmd_request->bounce_sgl_count;
1713 }
1714
1715
1716 if (sg_count > MAX_PAGE_BUFFER_COUNT) { 1453 if (sg_count > MAX_PAGE_BUFFER_COUNT) {
1717 1454
1718 payload_sz = (sg_count * sizeof(void *) + 1455 payload_sz = (sg_count * sizeof(void *) +
1719 sizeof(struct vmbus_packet_mpb_array)); 1456 sizeof(struct vmbus_packet_mpb_array));
1720 payload = kmalloc(payload_sz, GFP_ATOMIC); 1457 payload = kmalloc(payload_sz, GFP_ATOMIC);
1721 if (!payload) { 1458 if (!payload)
1722 if (cmd_request->bounce_sgl_count) 1459 return SCSI_MLQUEUE_DEVICE_BUSY;
1723 destroy_bounce_buffer(
1724 cmd_request->bounce_sgl,
1725 cmd_request->bounce_sgl_count);
1726
1727 return SCSI_MLQUEUE_DEVICE_BUSY;
1728 }
1729 } 1460 }
1730 1461
1731 payload->range.len = length; 1462 payload->range.len = length;
@@ -1754,11 +1485,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1754 1485
1755 if (ret == -EAGAIN) { 1486 if (ret == -EAGAIN) {
1756 /* no more space */ 1487 /* no more space */
1757
1758 if (cmd_request->bounce_sgl_count)
1759 destroy_bounce_buffer(cmd_request->bounce_sgl,
1760 cmd_request->bounce_sgl_count);
1761
1762 return SCSI_MLQUEUE_DEVICE_BUSY; 1488 return SCSI_MLQUEUE_DEVICE_BUSY;
1763 } 1489 }
1764 1490