aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_transport_spi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_transport_spi.c')
-rw-r--r--drivers/scsi/scsi_transport_spi.c93
1 files changed, 63 insertions, 30 deletions
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 67c6cc40ce16..2918b9600db7 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -348,17 +348,21 @@ spi_transport_rd_attr(rd_strm, "%d\n");
348spi_transport_rd_attr(rti, "%d\n"); 348spi_transport_rd_attr(rti, "%d\n");
349spi_transport_rd_attr(pcomp_en, "%d\n"); 349spi_transport_rd_attr(pcomp_en, "%d\n");
350 350
351/* we only care about the first child device so we return 1 */
352static int child_iter(struct device *dev, void *data)
353{
354 struct scsi_device *sdev = to_scsi_device(dev);
355
356 spi_dv_device(sdev);
357 return 1;
358}
359
351static ssize_t 360static ssize_t
352store_spi_revalidate(struct class_device *cdev, const char *buf, size_t count) 361store_spi_revalidate(struct class_device *cdev, const char *buf, size_t count)
353{ 362{
354 struct scsi_target *starget = transport_class_to_starget(cdev); 363 struct scsi_target *starget = transport_class_to_starget(cdev);
355 364
356 /* FIXME: we're relying on an awful lot of device internals 365 device_for_each_child(&starget->dev, NULL, child_iter);
357 * here. We really need a function to get the first available
358 * child */
359 struct device *dev = container_of(starget->dev.children.next, struct device, node);
360 struct scsi_device *sdev = to_scsi_device(dev);
361 spi_dv_device(sdev);
362 return count; 366 return count;
363} 367}
364static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate); 368static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate);
@@ -669,6 +673,7 @@ spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
669{ 673{
670 struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); 674 struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt);
671 struct scsi_device *sdev = sreq->sr_device; 675 struct scsi_device *sdev = sreq->sr_device;
676 struct scsi_target *starget = sdev->sdev_target;
672 int period = 0, prevperiod = 0; 677 int period = 0, prevperiod = 0;
673 enum spi_compare_returns retval; 678 enum spi_compare_returns retval;
674 679
@@ -682,24 +687,40 @@ spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
682 break; 687 break;
683 688
684 /* OK, retrain, fallback */ 689 /* OK, retrain, fallback */
690 if (i->f->get_iu)
691 i->f->get_iu(starget);
692 if (i->f->get_qas)
693 i->f->get_qas(starget);
685 if (i->f->get_period) 694 if (i->f->get_period)
686 i->f->get_period(sdev->sdev_target); 695 i->f->get_period(sdev->sdev_target);
687 newperiod = spi_period(sdev->sdev_target); 696
688 period = newperiod > period ? newperiod : period; 697 /* Here's the fallback sequence; first try turning off
689 if (period < 0x0d) 698 * IU, then QAS (if we can control them), then finally
690 period++; 699 * fall down the periods */
691 else 700 if (i->f->set_iu && spi_iu(starget)) {
692 period += period >> 1; 701 SPI_PRINTK(starget, KERN_ERR, "Domain Validation Disabing Information Units\n");
693 702 DV_SET(iu, 0);
694 if (unlikely(period > 0xff || period == prevperiod)) { 703 } else if (i->f->set_qas && spi_qas(starget)) {
695 /* Total failure; set to async and return */ 704 SPI_PRINTK(starget, KERN_ERR, "Domain Validation Disabing Quick Arbitration and Selection\n");
696 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation Failure, dropping back to Asynchronous\n"); 705 DV_SET(qas, 0);
697 DV_SET(offset, 0); 706 } else {
698 return SPI_COMPARE_FAILURE; 707 newperiod = spi_period(starget);
708 period = newperiod > period ? newperiod : period;
709 if (period < 0x0d)
710 period++;
711 else
712 period += period >> 1;
713
714 if (unlikely(period > 0xff || period == prevperiod)) {
715 /* Total failure; set to async and return */
716 SPI_PRINTK(starget, KERN_ERR, "Domain Validation Failure, dropping back to Asynchronous\n");
717 DV_SET(offset, 0);
718 return SPI_COMPARE_FAILURE;
719 }
720 SPI_PRINTK(starget, KERN_ERR, "Domain Validation detected failure, dropping back\n");
721 DV_SET(period, period);
722 prevperiod = period;
699 } 723 }
700 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation detected failure, dropping back\n");
701 DV_SET(period, period);
702 prevperiod = period;
703 } 724 }
704 return retval; 725 return retval;
705} 726}
@@ -768,23 +789,21 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
768 789
769 if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS) 790 if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS)
770 != SPI_COMPARE_SUCCESS) { 791 != SPI_COMPARE_SUCCESS) {
771 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation Initial Inquiry Failed\n"); 792 SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n");
772 /* FIXME: should probably offline the device here? */ 793 /* FIXME: should probably offline the device here? */
773 return; 794 return;
774 } 795 }
775 796
776 /* test width */ 797 /* test width */
777 if (i->f->set_width && spi_max_width(starget) && sdev->wdtr) { 798 if (i->f->set_width && spi_max_width(starget) && sdev->wdtr) {
778 i->f->set_width(sdev->sdev_target, 1); 799 i->f->set_width(starget, 1);
779
780 printk("WIDTH IS %d\n", spi_max_width(starget));
781 800
782 if (spi_dv_device_compare_inquiry(sreq, buffer, 801 if (spi_dv_device_compare_inquiry(sreq, buffer,
783 buffer + len, 802 buffer + len,
784 DV_LOOPS) 803 DV_LOOPS)
785 != SPI_COMPARE_SUCCESS) { 804 != SPI_COMPARE_SUCCESS) {
786 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Wide Transfers Fail\n"); 805 SPI_PRINTK(starget, KERN_ERR, "Wide Transfers Fail\n");
787 i->f->set_width(sdev->sdev_target, 0); 806 i->f->set_width(starget, 0);
788 } 807 }
789 } 808 }
790 809
@@ -792,7 +811,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
792 return; 811 return;
793 812
794 /* device can't handle synchronous */ 813 /* device can't handle synchronous */
795 if(!sdev->ppr && !sdev->sdtr) 814 if (!sdev->ppr && !sdev->sdtr)
796 return; 815 return;
797 816
798 /* see if the device has an echo buffer. If it does we can 817 /* see if the device has an echo buffer. If it does we can
@@ -807,16 +826,30 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
807 /* now set up to the maximum */ 826 /* now set up to the maximum */
808 DV_SET(offset, spi_max_offset(starget)); 827 DV_SET(offset, spi_max_offset(starget));
809 DV_SET(period, spi_min_period(starget)); 828 DV_SET(period, spi_min_period(starget));
829 /* try QAS requests; this should be harmless to set if the
830 * target supports it */
831 DV_SET(qas, 1);
832 /* Also try IU transfers */
833 DV_SET(iu, 1);
834 if (spi_min_period(starget) < 9) {
835 /* This u320 (or u640). Ignore the coupled parameters
836 * like DT and IU, but set the optional ones */
837 DV_SET(rd_strm, 1);
838 DV_SET(wr_flow, 1);
839 DV_SET(rti, 1);
840 if (spi_min_period(starget) == 8)
841 DV_SET(pcomp_en, 1);
842 }
810 843
811 if (len == 0) { 844 if (len == 0) {
812 SPI_PRINTK(sdev->sdev_target, KERN_INFO, "Domain Validation skipping write tests\n"); 845 SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n");
813 spi_dv_retrain(sreq, buffer, buffer + len, 846 spi_dv_retrain(sreq, buffer, buffer + len,
814 spi_dv_device_compare_inquiry); 847 spi_dv_device_compare_inquiry);
815 return; 848 return;
816 } 849 }
817 850
818 if (len > SPI_MAX_ECHO_BUFFER_SIZE) { 851 if (len > SPI_MAX_ECHO_BUFFER_SIZE) {
819 SPI_PRINTK(sdev->sdev_target, KERN_WARNING, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE); 852 SPI_PRINTK(starget, KERN_WARNING, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE);
820 len = SPI_MAX_ECHO_BUFFER_SIZE; 853 len = SPI_MAX_ECHO_BUFFER_SIZE;
821 } 854 }
822 855