aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mvsas.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-04-19 12:17:29 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-04-19 12:17:29 -0400
commitadf6d34e460387ee3e8f1e1875d52bff51212c7d (patch)
tree88ef100143e6184103a608f82dfd232bf6376eaf /drivers/scsi/mvsas.c
parentd1964dab60ce7c104dd21590e987a8787db18051 (diff)
parent3760d31f11bfbd0ead9eaeb8573e0602437a9d7c (diff)
Merge branch 'omap2-upstream' into devel
Diffstat (limited to 'drivers/scsi/mvsas.c')
-rw-r--r--drivers/scsi/mvsas.c711
1 files changed, 481 insertions, 230 deletions
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
index 5ec0665b3a3d..e55b9037adb2 100644
--- a/drivers/scsi/mvsas.c
+++ b/drivers/scsi/mvsas.c
@@ -37,11 +37,13 @@
37#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38#include <linux/ctype.h> 38#include <linux/ctype.h>
39#include <scsi/libsas.h> 39#include <scsi/libsas.h>
40#include <scsi/scsi_tcq.h>
41#include <scsi/sas_ata.h>
40#include <asm/io.h> 42#include <asm/io.h>
41 43
42#define DRV_NAME "mvsas" 44#define DRV_NAME "mvsas"
43#define DRV_VERSION "0.5.1" 45#define DRV_VERSION "0.5.2"
44#define _MV_DUMP 0 46#define _MV_DUMP 0
45#define MVS_DISABLE_NVRAM 47#define MVS_DISABLE_NVRAM
46#define MVS_DISABLE_MSI 48#define MVS_DISABLE_MSI
47 49
@@ -52,7 +54,7 @@
52 readl(regs + MVS_##reg); \ 54 readl(regs + MVS_##reg); \
53 } while (0) 55 } while (0)
54 56
55#define MVS_ID_NOT_MAPPED 0xff 57#define MVS_ID_NOT_MAPPED 0x7f
56#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) 58#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
57 59
58/* offset for D2H FIS in the Received FIS List Structure */ 60/* offset for D2H FIS in the Received FIS List Structure */
@@ -84,6 +86,7 @@ enum driver_configuration {
84 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ 86 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
85 87
86 MVS_QUEUE_SIZE = 30, /* Support Queue depth */ 88 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
89 MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
87}; 90};
88 91
89/* unchangeable hardware details */ 92/* unchangeable hardware details */
@@ -358,7 +361,20 @@ enum hw_register_bits {
358 361
359 /* VSR */ 362 /* VSR */
360 /* PHYMODE 6 (CDB) */ 363 /* PHYMODE 6 (CDB) */
361 PHY_MODE6_DTL_SPEED = (1U << 27), 364 PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
365 PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
366 PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
367 PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
368 PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
369 PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
370 PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
371 PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
372 PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
373 PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
374 PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
375 PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
376 PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
377 PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
362}; 378};
363 379
364enum mvs_info_flags { 380enum mvs_info_flags {
@@ -511,7 +527,43 @@ enum status_buffer {
511}; 527};
512 528
513enum error_info_rec { 529enum error_info_rec {
514 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ 530 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
531 CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
532 RSP_OVER = (1U << 29), /* rsp buffer overflow */
533 RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
534 UNK_FIS = (1U << 27), /* unknown FIS */
535 DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
536 SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
537 TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
538 R_ERR = (1U << 23), /* SATA returned R_ERR prim */
539 RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
540 XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
541 UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
542 DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
543 INTERLOCK = (1U << 15), /* interlock error */
544 NAK = (1U << 14), /* NAK rx'd */
545 ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
546 CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
547 OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
548 PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
549 NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
550 STP_RES_BSY = (1U << 8), /* STP resources busy */
551 BREAK = (1U << 7), /* break received */
552 BAD_DEST = (1U << 6), /* bad destination */
553 BAD_PROTO = (1U << 5), /* protocol not supported */
554 BAD_RATE = (1U << 4), /* cxn rate not supported */
555 WRONG_DEST = (1U << 3), /* wrong destination error */
556 CREDIT_TO = (1U << 2), /* credit timeout */
557 WDOG_TO = (1U << 1), /* watchdog timeout */
558 BUF_PAR = (1U << 0), /* buffer parity error */
559};
560
561enum error_info_rec_2 {
562 SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
563 GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
564 APP_CHK_ERR = (1U << 13), /* Application Check error */
565 REF_CHK_ERR = (1U << 12), /* Reference Check Error */
566 USR_BLK_NM = (1U << 0), /* User Block Number */
515}; 567};
516 568
517struct mvs_chip_info { 569struct mvs_chip_info {
@@ -543,28 +595,12 @@ struct mvs_cmd_hdr {
543 __le32 reserved[4]; 595 __le32 reserved[4];
544}; 596};
545 597
546struct mvs_slot_info {
547 struct sas_task *task;
548 u32 n_elem;
549 u32 tx;
550
551 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
552 * and PRD table
553 */
554 void *buf;
555 dma_addr_t buf_dma;
556#if _MV_DUMP
557 u32 cmd_size;
558#endif
559
560 void *response;
561};
562
563struct mvs_port { 598struct mvs_port {
564 struct asd_sas_port sas_port; 599 struct asd_sas_port sas_port;
565 u8 port_attached; 600 u8 port_attached;
566 u8 taskfileset; 601 u8 taskfileset;
567 u8 wide_port_phymap; 602 u8 wide_port_phymap;
603 struct list_head list;
568}; 604};
569 605
570struct mvs_phy { 606struct mvs_phy {
@@ -582,6 +618,27 @@ struct mvs_phy {
582 u32 frame_rcvd_size; 618 u32 frame_rcvd_size;
583 u8 frame_rcvd[32]; 619 u8 frame_rcvd[32];
584 u8 phy_attached; 620 u8 phy_attached;
621 enum sas_linkrate minimum_linkrate;
622 enum sas_linkrate maximum_linkrate;
623};
624
625struct mvs_slot_info {
626 struct list_head list;
627 struct sas_task *task;
628 u32 n_elem;
629 u32 tx;
630
631 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
632 * and PRD table
633 */
634 void *buf;
635 dma_addr_t buf_dma;
636#if _MV_DUMP
637 u32 cmd_size;
638#endif
639
640 void *response;
641 struct mvs_port *port;
585}; 642};
586 643
587struct mvs_info { 644struct mvs_info {
@@ -612,21 +669,14 @@ struct mvs_info {
612 669
613 const struct mvs_chip_info *chip; 670 const struct mvs_chip_info *chip;
614 671
615 unsigned long tags[MVS_SLOTS]; 672 u8 tags[MVS_SLOTS];
616 struct mvs_slot_info slot_info[MVS_SLOTS]; 673 struct mvs_slot_info slot_info[MVS_SLOTS];
617 /* further per-slot information */ 674 /* further per-slot information */
618 struct mvs_phy phy[MVS_MAX_PHYS]; 675 struct mvs_phy phy[MVS_MAX_PHYS];
619 struct mvs_port port[MVS_MAX_PHYS]; 676 struct mvs_port port[MVS_MAX_PHYS];
620 677#ifdef MVS_USE_TASKLET
621 u32 can_queue; /* per adapter */ 678 struct tasklet_struct tasklet;
622 u32 tag_out; /*Get*/ 679#endif
623 u32 tag_in; /*Give*/
624};
625
626struct mvs_queue_task {
627 struct list_head list;
628
629 void *uldd_task;
630}; 680};
631 681
632static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 682static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
@@ -641,10 +691,11 @@ static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
641static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); 691static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
642static void mvs_detect_porttype(struct mvs_info *mvi, int i); 692static void mvs_detect_porttype(struct mvs_info *mvi, int i);
643static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); 693static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
694static void mvs_release_task(struct mvs_info *mvi, int phy_no);
644 695
645static int mvs_scan_finished(struct Scsi_Host *, unsigned long); 696static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
646static void mvs_scan_start(struct Scsi_Host *); 697static void mvs_scan_start(struct Scsi_Host *);
647static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev); 698static int mvs_slave_configure(struct scsi_device *sdev);
648 699
649static struct scsi_transport_template *mvs_stt; 700static struct scsi_transport_template *mvs_stt;
650 701
@@ -659,7 +710,7 @@ static struct scsi_host_template mvs_sht = {
659 .name = DRV_NAME, 710 .name = DRV_NAME,
660 .queuecommand = sas_queuecommand, 711 .queuecommand = sas_queuecommand,
661 .target_alloc = sas_target_alloc, 712 .target_alloc = sas_target_alloc,
662 .slave_configure = sas_slave_configure, 713 .slave_configure = mvs_slave_configure,
663 .slave_destroy = sas_slave_destroy, 714 .slave_destroy = sas_slave_destroy,
664 .scan_finished = mvs_scan_finished, 715 .scan_finished = mvs_scan_finished,
665 .scan_start = mvs_scan_start, 716 .scan_start = mvs_scan_start,
@@ -674,7 +725,7 @@ static struct scsi_host_template mvs_sht = {
674 .use_clustering = ENABLE_CLUSTERING, 725 .use_clustering = ENABLE_CLUSTERING,
675 .eh_device_reset_handler = sas_eh_device_reset_handler, 726 .eh_device_reset_handler = sas_eh_device_reset_handler,
676 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 727 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
677 .slave_alloc = mvs_sas_slave_alloc, 728 .slave_alloc = sas_slave_alloc,
678 .target_destroy = sas_target_destroy, 729 .target_destroy = sas_target_destroy,
679 .ioctl = sas_ioctl, 730 .ioctl = sas_ioctl,
680}; 731};
@@ -709,10 +760,10 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
709 printk("\n"); 760 printk("\n");
710} 761}
711 762
763#if _MV_DUMP
712static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, 764static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
713 enum sas_protocol proto) 765 enum sas_protocol proto)
714{ 766{
715#if _MV_DUMP
716 u32 offset; 767 u32 offset;
717 struct pci_dev *pdev = mvi->pdev; 768 struct pci_dev *pdev = mvi->pdev;
718 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 769 struct mvs_slot_info *slot = &mvi->slot_info[tag];
@@ -723,14 +774,14 @@ static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
723 tag); 774 tag);
724 mvs_hexdump(32, (u8 *) slot->response, 775 mvs_hexdump(32, (u8 *) slot->response,
725 (u32) slot->buf_dma + offset); 776 (u32) slot->buf_dma + offset);
726#endif
727} 777}
778#endif
728 779
729static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, 780static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
730 enum sas_protocol proto) 781 enum sas_protocol proto)
731{ 782{
732#if _MV_DUMP 783#if _MV_DUMP
733 u32 sz, w_ptr, r_ptr; 784 u32 sz, w_ptr;
734 u64 addr; 785 u64 addr;
735 void __iomem *regs = mvi->regs; 786 void __iomem *regs = mvi->regs;
736 struct pci_dev *pdev = mvi->pdev; 787 struct pci_dev *pdev = mvi->pdev;
@@ -738,12 +789,10 @@ static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
738 789
739 /*Delivery Queue */ 790 /*Delivery Queue */
740 sz = mr32(TX_CFG) & TX_RING_SZ_MASK; 791 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
741 w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK; 792 w_ptr = slot->tx;
742 r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK;
743 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); 793 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
744 dev_printk(KERN_DEBUG, &pdev->dev, 794 dev_printk(KERN_DEBUG, &pdev->dev,
745 "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n", 795 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
746 sz, w_ptr, r_ptr);
747 dev_printk(KERN_DEBUG, &pdev->dev, 796 dev_printk(KERN_DEBUG, &pdev->dev,
748 "Delivery Queue Base Address=0x%llX (PA)" 797 "Delivery Queue Base Address=0x%llX (PA)"
749 "(tx_dma=0x%llX), Entry=%04d\n", 798 "(tx_dma=0x%llX), Entry=%04d\n",
@@ -751,11 +800,11 @@ static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
751 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), 800 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
752 (u32) mvi->tx_dma + sizeof(u32) * w_ptr); 801 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
753 /*Command List */ 802 /*Command List */
754 addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO); 803 addr = mvi->slot_dma;
755 dev_printk(KERN_DEBUG, &pdev->dev, 804 dev_printk(KERN_DEBUG, &pdev->dev,
756 "Command List Base Address=0x%llX (PA)" 805 "Command List Base Address=0x%llX (PA)"
757 "(slot_dma=0x%llX), Header=%03d\n", 806 "(slot_dma=0x%llX), Header=%03d\n",
758 addr, mvi->slot_dma, tag); 807 addr, slot->buf_dma, tag);
759 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); 808 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
760 /*mvs_cmd_hdr */ 809 /*mvs_cmd_hdr */
761 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), 810 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
@@ -779,7 +828,7 @@ static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
779 828
780static void mvs_hba_cq_dump(struct mvs_info *mvi) 829static void mvs_hba_cq_dump(struct mvs_info *mvi)
781{ 830{
782#if _MV_DUMP 831#if (_MV_DUMP > 2)
783 u64 addr; 832 u64 addr;
784 void __iomem *regs = mvi->regs; 833 void __iomem *regs = mvi->regs;
785 struct pci_dev *pdev = mvi->pdev; 834 struct pci_dev *pdev = mvi->pdev;
@@ -788,8 +837,8 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi)
788 837
789 /*Completion Queue */ 838 /*Completion Queue */
790 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); 839 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
791 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n", 840 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
792 (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); 841 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
793 dev_printk(KERN_DEBUG, &pdev->dev, 842 dev_printk(KERN_DEBUG, &pdev->dev,
794 "Completion List Base Address=0x%llX (PA), " 843 "Completion List Base Address=0x%llX (PA), "
795 "CQ_Entry=%04d, CQ_WP=0x%08X\n", 844 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
@@ -854,34 +903,53 @@ static int pci_go_64(struct pci_dev *pdev)
854 return rc; 903 return rc;
855} 904}
856 905
906static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
907{
908 if (task->lldd_task) {
909 struct mvs_slot_info *slot;
910 slot = (struct mvs_slot_info *) task->lldd_task;
911 *tag = slot - mvi->slot_info;
912 return 1;
913 }
914 return 0;
915}
916
857static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 917static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
858{ 918{
859 mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1); 919 void *bitmap = (void *) &mvi->tags;
860 mvi->tags[mvi->tag_in] = tag; 920 clear_bit(tag, bitmap);
861} 921}
862 922
863static void mvs_tag_free(struct mvs_info *mvi, u32 tag) 923static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
864{ 924{
865 mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1); 925 mvs_tag_clear(mvi, tag);
926}
927
928static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
929{
930 void *bitmap = (void *) &mvi->tags;
931 set_bit(tag, bitmap);
866} 932}
867 933
868static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 934static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
869{ 935{
870 if (mvi->tag_out != mvi->tag_in) { 936 unsigned int index, tag;
871 *tag_out = mvi->tags[mvi->tag_out]; 937 void *bitmap = (void *) &mvi->tags;
872 mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1); 938
873 return 0; 939 index = find_first_zero_bit(bitmap, MVS_SLOTS);
874 } 940 tag = index;
875 return -EBUSY; 941 if (tag >= MVS_SLOTS)
942 return -SAS_QUEUE_FULL;
943 mvs_tag_set(mvi, tag);
944 *tag_out = tag;
945 return 0;
876} 946}
877 947
878static void mvs_tag_init(struct mvs_info *mvi) 948static void mvs_tag_init(struct mvs_info *mvi)
879{ 949{
880 int i; 950 int i;
881 for (i = 0; i < MVS_SLOTS; ++i) 951 for (i = 0; i < MVS_SLOTS; ++i)
882 mvi->tags[i] = i; 952 mvs_tag_clear(mvi, i);
883 mvi->tag_out = 0;
884 mvi->tag_in = MVS_SLOTS - 1;
885} 953}
886 954
887#ifndef MVS_DISABLE_NVRAM 955#ifndef MVS_DISABLE_NVRAM
@@ -1013,10 +1081,21 @@ err_out:
1013static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) 1081static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1014{ 1082{
1015 struct mvs_phy *phy = &mvi->phy[i]; 1083 struct mvs_phy *phy = &mvi->phy[i];
1084 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
1016 1085
1017 if (!phy->phy_attached) 1086 if (!phy->phy_attached)
1018 return; 1087 return;
1019 1088
1089 if (sas_phy->phy) {
1090 struct sas_phy *sphy = sas_phy->phy;
1091
1092 sphy->negotiated_linkrate = sas_phy->linkrate;
1093 sphy->minimum_linkrate = phy->minimum_linkrate;
1094 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
1095 sphy->maximum_linkrate = phy->maximum_linkrate;
1096 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
1097 }
1098
1020 if (phy->phy_type & PORT_TYPE_SAS) { 1099 if (phy->phy_type & PORT_TYPE_SAS) {
1021 struct sas_identify_frame *id; 1100 struct sas_identify_frame *id;
1022 1101
@@ -1053,80 +1132,149 @@ static void mvs_scan_start(struct Scsi_Host *shost)
1053 } 1132 }
1054} 1133}
1055 1134
1056static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev) 1135static int mvs_slave_configure(struct scsi_device *sdev)
1057{ 1136{
1058 int rc; 1137 struct domain_device *dev = sdev_to_domain_dev(sdev);
1138 int ret = sas_slave_configure(sdev);
1059 1139
1060 rc = sas_slave_alloc(scsi_dev); 1140 if (ret)
1141 return ret;
1061 1142
1062 return rc; 1143 if (dev_is_sata(dev)) {
1144 /* struct ata_port *ap = dev->sata_dev.ap; */
1145 /* struct ata_device *adev = ap->link.device; */
1146
1147 /* clamp at no NCQ for the time being */
1148 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
1149 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
1150 }
1151 return 0;
1063} 1152}
1064 1153
1065static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events) 1154static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1066{ 1155{
1067 struct pci_dev *pdev = mvi->pdev; 1156 struct pci_dev *pdev = mvi->pdev;
1068 struct sas_ha_struct *sas_ha = &mvi->sas; 1157 struct sas_ha_struct *sas_ha = &mvi->sas;
1069 struct mvs_phy *phy = &mvi->phy[port_no]; 1158 struct mvs_phy *phy = &mvi->phy[phy_no];
1070 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1159 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1071 1160
1072 phy->irq_status = mvs_read_port_irq_stat(mvi, port_no); 1161 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
1073 /* 1162 /*
1074 * events is port event now , 1163 * events is port event now ,
1075 * we need check the interrupt status which belongs to per port. 1164 * we need check the interrupt status which belongs to per port.
1076 */ 1165 */
1077 dev_printk(KERN_DEBUG, &pdev->dev, 1166 dev_printk(KERN_DEBUG, &pdev->dev,
1078 "Port %d Event = %X\n", 1167 "Port %d Event = %X\n",
1079 port_no, phy->irq_status); 1168 phy_no, phy->irq_status);
1080 1169
1081 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { 1170 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1082 if (!mvs_is_phy_ready(mvi, port_no)) { 1171 mvs_release_task(mvi, phy_no);
1172 if (!mvs_is_phy_ready(mvi, phy_no)) {
1083 sas_phy_disconnected(sas_phy); 1173 sas_phy_disconnected(sas_phy);
1084 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 1174 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1175 dev_printk(KERN_INFO, &pdev->dev,
1176 "Port %d Unplug Notice\n", phy_no);
1177
1085 } else 1178 } else
1086 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); 1179 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1087 } 1180 }
1088 if (!(phy->irq_status & PHYEV_DEC_ERR)) { 1181 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1089 if (phy->irq_status & PHYEV_COMWAKE) { 1182 if (phy->irq_status & PHYEV_COMWAKE) {
1090 u32 tmp = mvs_read_port_irq_mask(mvi, port_no); 1183 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1091 mvs_write_port_irq_mask(mvi, port_no, 1184 mvs_write_port_irq_mask(mvi, phy_no,
1092 tmp | PHYEV_SIG_FIS); 1185 tmp | PHYEV_SIG_FIS);
1093 } 1186 }
1094 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 1187 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1095 phy->phy_status = mvs_is_phy_ready(mvi, port_no); 1188 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1096 if (phy->phy_status) { 1189 if (phy->phy_status) {
1097 mvs_detect_porttype(mvi, port_no); 1190 mvs_detect_porttype(mvi, phy_no);
1098 1191
1099 if (phy->phy_type & PORT_TYPE_SATA) { 1192 if (phy->phy_type & PORT_TYPE_SATA) {
1100 u32 tmp = mvs_read_port_irq_mask(mvi, 1193 u32 tmp = mvs_read_port_irq_mask(mvi,
1101 port_no); 1194 phy_no);
1102 tmp &= ~PHYEV_SIG_FIS; 1195 tmp &= ~PHYEV_SIG_FIS;
1103 mvs_write_port_irq_mask(mvi, 1196 mvs_write_port_irq_mask(mvi,
1104 port_no, tmp); 1197 phy_no, tmp);
1105 } 1198 }
1106 1199
1107 mvs_update_phyinfo(mvi, port_no, 0); 1200 mvs_update_phyinfo(mvi, phy_no, 0);
1108 sas_ha->notify_phy_event(sas_phy, 1201 sas_ha->notify_phy_event(sas_phy,
1109 PHYE_OOB_DONE); 1202 PHYE_OOB_DONE);
1110 mvs_bytes_dmaed(mvi, port_no); 1203 mvs_bytes_dmaed(mvi, phy_no);
1111 } else { 1204 } else {
1112 dev_printk(KERN_DEBUG, &pdev->dev, 1205 dev_printk(KERN_DEBUG, &pdev->dev,
1113 "plugin interrupt but phy is gone\n"); 1206 "plugin interrupt but phy is gone\n");
1114 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, 1207 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1115 NULL); 1208 NULL);
1116 } 1209 }
1117 } else if (phy->irq_status & PHYEV_BROAD_CH) 1210 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1211 mvs_release_task(mvi, phy_no);
1118 sas_ha->notify_port_event(sas_phy, 1212 sas_ha->notify_port_event(sas_phy,
1119 PORTE_BROADCAST_RCVD); 1213 PORTE_BROADCAST_RCVD);
1214 }
1120 } 1215 }
1121 mvs_write_port_irq_stat(mvi, port_no, phy->irq_status); 1216 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
1122} 1217}
1123 1218
1124static void mvs_int_sata(struct mvs_info *mvi) 1219static void mvs_int_sata(struct mvs_info *mvi)
1125{ 1220{
1126 /* FIXME */ 1221 u32 tmp;
1222 void __iomem *regs = mvi->regs;
1223 tmp = mr32(INT_STAT_SRS);
1224 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1225}
1226
1227static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1228 u32 slot_idx)
1229{
1230 void __iomem *regs = mvi->regs;
1231 struct domain_device *dev = task->dev;
1232 struct asd_sas_port *sas_port = dev->port;
1233 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1234 u32 reg_set, phy_mask;
1235
1236 if (!sas_protocol_ata(task->task_proto)) {
1237 reg_set = 0;
1238 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1239 sas_port->phy_mask;
1240 } else {
1241 reg_set = port->taskfileset;
1242 phy_mask = sas_port->phy_mask;
1243 }
1244 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1245 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1246 (phy_mask << TXQ_PHY_SHIFT) |
1247 (reg_set << TXQ_SRS_SHIFT));
1248
1249 mw32(TX_PROD_IDX, mvi->tx_prod);
1250 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1251}
1252
1253static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1254 u32 slot_idx, int err)
1255{
1256 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1257 struct task_status_struct *tstat = &task->task_status;
1258 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1259 int stat = SAM_GOOD;
1260
1261 resp->frame_len = sizeof(struct dev_to_host_fis);
1262 memcpy(&resp->ending_fis[0],
1263 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1264 sizeof(struct dev_to_host_fis));
1265 tstat->buf_valid_size = sizeof(*resp);
1266 if (unlikely(err))
1267 stat = SAS_PROTO_RESPONSE;
1268 return stat;
1127} 1269}
1128 1270
1129static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task, 1271static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1272{
1273 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1274 mvs_tag_clear(mvi, slot_idx);
1275}
1276
1277static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1130 struct mvs_slot_info *slot, u32 slot_idx) 1278 struct mvs_slot_info *slot, u32 slot_idx)
1131{ 1279{
1132 if (!sas_protocol_ata(task->task_proto)) 1280 if (!sas_protocol_ata(task->task_proto))
@@ -1149,38 +1297,58 @@ static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
1149 /* do nothing */ 1297 /* do nothing */
1150 break; 1298 break;
1151 } 1299 }
1152 1300 list_del(&slot->list);
1301 task->lldd_task = NULL;
1153 slot->task = NULL; 1302 slot->task = NULL;
1154 mvs_tag_clear(mvi, slot_idx); 1303 slot->port = NULL;
1155} 1304}
1156 1305
1157static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1306static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1158 u32 slot_idx) 1307 u32 slot_idx)
1159{ 1308{
1160 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1309 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1161 u64 err_dw0 = *(u32 *) slot->response; 1310 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1162 void __iomem *regs = mvi->regs; 1311 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
1163 u32 tmp; 1312 int stat = SAM_CHECK_COND;
1164 1313
1165 if (err_dw0 & CMD_ISS_STPD) 1314 if (err_dw1 & SLOT_BSY_ERR) {
1166 if (sas_protocol_ata(task->task_proto)) { 1315 stat = SAS_QUEUE_FULL;
1167 tmp = mr32(INT_STAT_SRS); 1316 mvs_slot_reset(mvi, task, slot_idx);
1168 mw32(INT_STAT_SRS, tmp & 0xFFFF); 1317 }
1169 } 1318 switch (task->task_proto) {
1319 case SAS_PROTOCOL_SSP:
1320 break;
1321 case SAS_PROTOCOL_SMP:
1322 break;
1323 case SAS_PROTOCOL_SATA:
1324 case SAS_PROTOCOL_STP:
1325 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1326 if (err_dw0 & TFILE_ERR)
1327 stat = mvs_sata_done(mvi, task, slot_idx, 1);
1328 break;
1329 default:
1330 break;
1331 }
1170 1332
1171 mvs_hba_sb_dump(mvi, slot_idx, task->task_proto); 1333 mvs_hexdump(16, (u8 *) slot->response, 0);
1334 return stat;
1172} 1335}
1173 1336
1174static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc) 1337static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1175{ 1338{
1176 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 1339 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1177 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1340 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1178 struct sas_task *task = slot->task; 1341 struct sas_task *task = slot->task;
1179 struct task_status_struct *tstat = &task->task_status; 1342 struct task_status_struct *tstat;
1180 struct mvs_port *port = &mvi->port[task->dev->port->id]; 1343 struct mvs_port *port;
1181 bool aborted; 1344 bool aborted;
1182 void *to; 1345 void *to;
1183 1346
1347 if (unlikely(!task || !task->lldd_task))
1348 return -1;
1349
1350 mvs_hba_cq_dump(mvi);
1351
1184 spin_lock(&task->task_state_lock); 1352 spin_lock(&task->task_state_lock);
1185 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 1353 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1186 if (!aborted) { 1354 if (!aborted) {
@@ -1190,22 +1358,27 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
1190 } 1358 }
1191 spin_unlock(&task->task_state_lock); 1359 spin_unlock(&task->task_state_lock);
1192 1360
1193 if (aborted) 1361 if (aborted) {
1362 mvs_slot_task_free(mvi, task, slot, slot_idx);
1363 mvs_slot_free(mvi, rx_desc);
1194 return -1; 1364 return -1;
1365 }
1195 1366
1367 port = slot->port;
1368 tstat = &task->task_status;
1196 memset(tstat, 0, sizeof(*tstat)); 1369 memset(tstat, 0, sizeof(*tstat));
1197 tstat->resp = SAS_TASK_COMPLETE; 1370 tstat->resp = SAS_TASK_COMPLETE;
1198 1371
1199 1372 if (unlikely(!port->port_attached || flags)) {
1200 if (unlikely(!port->port_attached)) { 1373 mvs_slot_err(mvi, task, slot_idx);
1201 tstat->stat = SAS_PHY_DOWN; 1374 if (!sas_protocol_ata(task->task_proto))
1375 tstat->stat = SAS_PHY_DOWN;
1202 goto out; 1376 goto out;
1203 } 1377 }
1204 1378
1205 /* error info record present */ 1379 /* error info record present */
1206 if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) { 1380 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1207 tstat->stat = SAM_CHECK_COND; 1381 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1208 mvs_slot_err(mvi, task, slot_idx);
1209 goto out; 1382 goto out;
1210 } 1383 }
1211 1384
@@ -1242,21 +1415,7 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
1242 case SAS_PROTOCOL_SATA: 1415 case SAS_PROTOCOL_SATA:
1243 case SAS_PROTOCOL_STP: 1416 case SAS_PROTOCOL_STP:
1244 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { 1417 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1245 struct ata_task_resp *resp = 1418 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1246 (struct ata_task_resp *)tstat->buf;
1247
1248 if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) ==
1249 RXQ_DONE)
1250 tstat->stat = SAM_GOOD;
1251 else
1252 tstat->stat = SAM_CHECK_COND;
1253
1254 resp->frame_len = sizeof(struct dev_to_host_fis);
1255 memcpy(&resp->ending_fis[0],
1256 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1257 sizeof(struct dev_to_host_fis));
1258 if (resp->ending_fis[2] & ATA_ERR)
1259 mvs_hexdump(16, resp->ending_fis, 0);
1260 break; 1419 break;
1261 } 1420 }
1262 1421
@@ -1266,11 +1425,34 @@ static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
1266 } 1425 }
1267 1426
1268out: 1427out:
1269 mvs_slot_free(mvi, task, slot, slot_idx); 1428 mvs_slot_task_free(mvi, task, slot, slot_idx);
1429 if (unlikely(tstat->stat != SAS_QUEUE_FULL))
1430 mvs_slot_free(mvi, rx_desc);
1431
1432 spin_unlock(&mvi->lock);
1270 task->task_done(task); 1433 task->task_done(task);
1434 spin_lock(&mvi->lock);
1271 return tstat->stat; 1435 return tstat->stat;
1272} 1436}
1273 1437
1438static void mvs_release_task(struct mvs_info *mvi, int phy_no)
1439{
1440 struct list_head *pos, *n;
1441 struct mvs_slot_info *slot;
1442 struct mvs_phy *phy = &mvi->phy[phy_no];
1443 struct mvs_port *port = phy->port;
1444 u32 rx_desc;
1445
1446 if (!port)
1447 return;
1448
1449 list_for_each_safe(pos, n, &port->list) {
1450 slot = container_of(pos, struct mvs_slot_info, list);
1451 rx_desc = (u32) (slot - mvi->slot_info);
1452 mvs_slot_complete(mvi, rx_desc, 1);
1453 }
1454}
1455
1274static void mvs_int_full(struct mvs_info *mvi) 1456static void mvs_int_full(struct mvs_info *mvi)
1275{ 1457{
1276 void __iomem *regs = mvi->regs; 1458 void __iomem *regs = mvi->regs;
@@ -1305,40 +1487,43 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1305 * we don't have to stall the CPU reading that register. 1487 * we don't have to stall the CPU reading that register.
1306 * The actual RX ring is offset by one dword, due to this. 1488 * The actual RX ring is offset by one dword, due to this.
1307 */ 1489 */
1308 rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; 1490 rx_prod_idx = mvi->rx_cons;
1309 if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */ 1491 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
1310 mvi->rx_cons = 0xfff; 1492 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
1311 return 0; 1493 return 0;
1312 }
1313 1494
1314 /* The CMPL_Q may come late, read from register and try again 1495 /* The CMPL_Q may come late, read from register and try again
1315 * note: if coalescing is enabled, 1496 * note: if coalescing is enabled,
1316 * it will need to read from register every time for sure 1497 * it will need to read from register every time for sure
1317 */ 1498 */
1318 if (mvi->rx_cons == rx_prod_idx) 1499 if (mvi->rx_cons == rx_prod_idx)
1319 return 0; 1500 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1320 1501
1321 if (mvi->rx_cons == 0xfff) 1502 if (mvi->rx_cons == rx_prod_idx)
1322 mvi->rx_cons = MVS_RX_RING_SZ - 1; 1503 return 0;
1323 1504
1324 while (mvi->rx_cons != rx_prod_idx) { 1505 while (mvi->rx_cons != rx_prod_idx) {
1325 1506
1326 /* increment our internal RX consumer pointer */ 1507 /* increment our internal RX consumer pointer */
1327 mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1); 1508 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
1328 1509
1329 rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]); 1510 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
1330
1331 mvs_hba_cq_dump(mvi);
1332 1511
1333 if (likely(rx_desc & RXQ_DONE)) 1512 if (likely(rx_desc & RXQ_DONE))
1334 mvs_slot_complete(mvi, rx_desc); 1513 mvs_slot_complete(mvi, rx_desc, 0);
1335 if (rx_desc & RXQ_ATTN) { 1514 if (rx_desc & RXQ_ATTN) {
1336 attn = true; 1515 attn = true;
1337 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", 1516 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1338 rx_desc); 1517 rx_desc);
1339 } else if (rx_desc & RXQ_ERR) { 1518 } else if (rx_desc & RXQ_ERR) {
1519 if (!(rx_desc & RXQ_DONE))
1520 mvs_slot_complete(mvi, rx_desc, 0);
1340 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", 1521 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1341 rx_desc); 1522 rx_desc);
1523 } else if (rx_desc & RXQ_SLOT_RESET) {
1524 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1525 rx_desc);
1526 mvs_slot_free(mvi, rx_desc);
1342 } 1527 }
1343 } 1528 }
1344 1529
@@ -1348,6 +1533,23 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1348 return 0; 1533 return 0;
1349} 1534}
1350 1535
1536#ifdef MVS_USE_TASKLET
1537static void mvs_tasklet(unsigned long data)
1538{
1539 struct mvs_info *mvi = (struct mvs_info *) data;
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&mvi->lock, flags);
1543
1544#ifdef MVS_DISABLE_MSI
1545 mvs_int_full(mvi);
1546#else
1547 mvs_int_rx(mvi, true);
1548#endif
1549 spin_unlock_irqrestore(&mvi->lock, flags);
1550}
1551#endif
1552
1351static irqreturn_t mvs_interrupt(int irq, void *opaque) 1553static irqreturn_t mvs_interrupt(int irq, void *opaque)
1352{ 1554{
1353 struct mvs_info *mvi = opaque; 1555 struct mvs_info *mvi = opaque;
@@ -1356,18 +1558,21 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
1356 1558
1357 stat = mr32(GBL_INT_STAT); 1559 stat = mr32(GBL_INT_STAT);
1358 1560
1359 /* clear CMD_CMPLT ASAP */
1360 mw32_f(INT_STAT, CINT_DONE);
1361
1362 if (stat == 0 || stat == 0xffffffff) 1561 if (stat == 0 || stat == 0xffffffff)
1363 return IRQ_NONE; 1562 return IRQ_NONE;
1364 1563
1564 /* clear CMD_CMPLT ASAP */
1565 mw32_f(INT_STAT, CINT_DONE);
1566
1567#ifndef MVS_USE_TASKLET
1365 spin_lock(&mvi->lock); 1568 spin_lock(&mvi->lock);
1366 1569
1367 mvs_int_full(mvi); 1570 mvs_int_full(mvi);
1368 1571
1369 spin_unlock(&mvi->lock); 1572 spin_unlock(&mvi->lock);
1370 1573#else
1574 tasklet_schedule(&mvi->tasklet);
1575#endif
1371 return IRQ_HANDLED; 1576 return IRQ_HANDLED;
1372} 1577}
1373 1578
@@ -1376,12 +1581,15 @@ static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1376{ 1581{
1377 struct mvs_info *mvi = opaque; 1582 struct mvs_info *mvi = opaque;
1378 1583
1584#ifndef MVS_USE_TASKLET
1379 spin_lock(&mvi->lock); 1585 spin_lock(&mvi->lock);
1380 1586
1381 mvs_int_rx(mvi, true); 1587 mvs_int_rx(mvi, true);
1382 1588
1383 spin_unlock(&mvi->lock); 1589 spin_unlock(&mvi->lock);
1384 1590#else
1591 tasklet_schedule(&mvi->tasklet);
1592#endif
1385 return IRQ_HANDLED; 1593 return IRQ_HANDLED;
1386} 1594}
1387#endif 1595#endif
@@ -1576,15 +1784,19 @@ static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1576 return MVS_ID_NOT_MAPPED; 1784 return MVS_ID_NOT_MAPPED;
1577} 1785}
1578 1786
1579static u32 mvs_get_ncq_tag(struct sas_task *task) 1787static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
1580{ 1788{
1581 u32 tag = 0;
1582 struct ata_queued_cmd *qc = task->uldd_task; 1789 struct ata_queued_cmd *qc = task->uldd_task;
1583 1790
1584 if (qc) 1791 if (qc) {
1585 tag = qc->tag; 1792 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
1793 qc->tf.command == ATA_CMD_FPDMA_READ) {
1794 *tag = qc->tag;
1795 return 1;
1796 }
1797 }
1586 1798
1587 return tag; 1799 return 0;
1588} 1800}
1589 1801
1590static int mvs_task_prep_ata(struct mvs_info *mvi, 1802static int mvs_task_prep_ata(struct mvs_info *mvi,
@@ -1628,11 +1840,9 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
1628 hdr->flags = cpu_to_le32(flags); 1840 hdr->flags = cpu_to_le32(flags);
1629 1841
1630 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ 1842 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1631 if (task->ata_task.use_ncq) { 1843 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
1632 hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task)); 1844 task->ata_task.fis.sector_count |= hdr->tags << 3;
1633 /*Fill in task file */ 1845 else
1634 task->ata_task.fis.sector_count = hdr->tags << 3;
1635 } else
1636 hdr->tags = cpu_to_le32(tag); 1846 hdr->tags = cpu_to_le32(tag);
1637 hdr->data_len = cpu_to_le32(task->total_xfer_len); 1847 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1638 1848
@@ -1725,13 +1935,16 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
1725 u32 flags; 1935 u32 flags;
1726 u32 resp_len, req_len, i, tag = tei->tag; 1936 u32 resp_len, req_len, i, tag = tei->tag;
1727 const u32 max_resp_len = SB_RFB_MAX; 1937 const u32 max_resp_len = SB_RFB_MAX;
1938 u8 phy_mask;
1728 1939
1729 slot = &mvi->slot_info[tag]; 1940 slot = &mvi->slot_info[tag];
1730 1941
1942 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1943 task->dev->port->phy_mask;
1731 slot->tx = mvi->tx_prod; 1944 slot->tx = mvi->tx_prod;
1732 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | 1945 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1733 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | 1946 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1734 (port->wide_port_phymap << TXQ_PHY_SHIFT)); 1947 (phy_mask << TXQ_PHY_SHIFT));
1735 1948
1736 flags = MCH_RETRY; 1949 flags = MCH_RETRY;
1737 if (task->ssp_task.enable_first_burst) { 1950 if (task->ssp_task.enable_first_burst) {
@@ -1832,22 +2045,32 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
1832 void __iomem *regs = mvi->regs; 2045 void __iomem *regs = mvi->regs;
1833 struct mvs_task_exec_info tei; 2046 struct mvs_task_exec_info tei;
1834 struct sas_task *t = task; 2047 struct sas_task *t = task;
2048 struct mvs_slot_info *slot;
1835 u32 tag = 0xdeadbeef, rc, n_elem = 0; 2049 u32 tag = 0xdeadbeef, rc, n_elem = 0;
1836 unsigned long flags; 2050 unsigned long flags;
1837 u32 n = num, pass = 0; 2051 u32 n = num, pass = 0;
1838 2052
1839 spin_lock_irqsave(&mvi->lock, flags); 2053 spin_lock_irqsave(&mvi->lock, flags);
1840
1841 do { 2054 do {
2055 dev = t->dev;
1842 tei.port = &mvi->port[dev->port->id]; 2056 tei.port = &mvi->port[dev->port->id];
1843 2057
1844 if (!tei.port->port_attached) { 2058 if (!tei.port->port_attached) {
1845 struct task_status_struct *ts = &t->task_status; 2059 if (sas_protocol_ata(t->task_proto)) {
1846 ts->stat = SAS_PHY_DOWN; 2060 rc = SAS_PHY_DOWN;
1847 t->task_done(t); 2061 goto out_done;
1848 rc = 0; 2062 } else {
1849 goto exec_exit; 2063 struct task_status_struct *ts = &t->task_status;
2064 ts->resp = SAS_TASK_UNDELIVERED;
2065 ts->stat = SAS_PHY_DOWN;
2066 t->task_done(t);
2067 if (n > 1)
2068 t = list_entry(t->list.next,
2069 struct sas_task, list);
2070 continue;
2071 }
1850 } 2072 }
2073
1851 if (!sas_protocol_ata(t->task_proto)) { 2074 if (!sas_protocol_ata(t->task_proto)) {
1852 if (t->num_scatter) { 2075 if (t->num_scatter) {
1853 n_elem = pci_map_sg(mvi->pdev, t->scatter, 2076 n_elem = pci_map_sg(mvi->pdev, t->scatter,
@@ -1866,9 +2089,10 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
1866 if (rc) 2089 if (rc)
1867 goto err_out; 2090 goto err_out;
1868 2091
1869 mvi->slot_info[tag].task = t; 2092 slot = &mvi->slot_info[tag];
1870 mvi->slot_info[tag].n_elem = n_elem; 2093 t->lldd_task = NULL;
1871 memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ); 2094 slot->n_elem = n_elem;
2095 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
1872 tei.task = t; 2096 tei.task = t;
1873 tei.hdr = &mvi->slot[tag]; 2097 tei.hdr = &mvi->slot[tag];
1874 tei.tag = tag; 2098 tei.tag = tag;
@@ -1897,28 +2121,26 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
1897 if (rc) 2121 if (rc)
1898 goto err_out_tag; 2122 goto err_out_tag;
1899 2123
2124 slot->task = t;
2125 slot->port = tei.port;
2126 t->lldd_task = (void *) slot;
2127 list_add_tail(&slot->list, &slot->port->list);
1900 /* TODO: select normal or high priority */ 2128 /* TODO: select normal or high priority */
1901 2129
1902 spin_lock(&t->task_state_lock); 2130 spin_lock(&t->task_state_lock);
1903 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 2131 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
1904 spin_unlock(&t->task_state_lock); 2132 spin_unlock(&t->task_state_lock);
1905 2133
1906 if (n == 1) {
1907 spin_unlock_irqrestore(&mvi->lock, flags);
1908 mw32(TX_PROD_IDX, mvi->tx_prod);
1909 }
1910 mvs_hba_memory_dump(mvi, tag, t->task_proto); 2134 mvs_hba_memory_dump(mvi, tag, t->task_proto);
1911 2135
1912 ++pass; 2136 ++pass;
1913 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 2137 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1914 2138 if (n > 1)
1915 if (n == 1) 2139 t = list_entry(t->list.next, struct sas_task, list);
1916 break;
1917
1918 t = list_entry(t->list.next, struct sas_task, list);
1919 } while (--n); 2140 } while (--n);
1920 2141
1921 return 0; 2142 rc = 0;
2143 goto out_done;
1922 2144
1923err_out_tag: 2145err_out_tag:
1924 mvs_tag_free(mvi, tag); 2146 mvs_tag_free(mvi, tag);
@@ -1928,7 +2150,7 @@ err_out:
1928 if (n_elem) 2150 if (n_elem)
1929 pci_unmap_sg(mvi->pdev, t->scatter, n_elem, 2151 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
1930 t->data_dir); 2152 t->data_dir);
1931exec_exit: 2153out_done:
1932 if (pass) 2154 if (pass)
1933 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); 2155 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1934 spin_unlock_irqrestore(&mvi->lock, flags); 2156 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1937,42 +2159,59 @@ exec_exit:
1937 2159
1938static int mvs_task_abort(struct sas_task *task) 2160static int mvs_task_abort(struct sas_task *task)
1939{ 2161{
1940 int rc = 1; 2162 int rc;
1941 unsigned long flags; 2163 unsigned long flags;
1942 struct mvs_info *mvi = task->dev->port->ha->lldd_ha; 2164 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1943 struct pci_dev *pdev = mvi->pdev; 2165 struct pci_dev *pdev = mvi->pdev;
2166 int tag;
1944 2167
1945 spin_lock_irqsave(&task->task_state_lock, flags); 2168 spin_lock_irqsave(&task->task_state_lock, flags);
1946 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 2169 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1947 rc = TMF_RESP_FUNC_COMPLETE; 2170 rc = TMF_RESP_FUNC_COMPLETE;
2171 spin_unlock_irqrestore(&task->task_state_lock, flags);
1948 goto out_done; 2172 goto out_done;
1949 } 2173 }
1950 spin_unlock_irqrestore(&task->task_state_lock, flags); 2174 spin_unlock_irqrestore(&task->task_state_lock, flags);
1951 2175
1952 /*FIXME*/
1953 rc = TMF_RESP_FUNC_COMPLETE;
1954
1955 switch (task->task_proto) { 2176 switch (task->task_proto) {
1956 case SAS_PROTOCOL_SMP: 2177 case SAS_PROTOCOL_SMP:
1957 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! "); 2178 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
1958 break; 2179 break;
1959 case SAS_PROTOCOL_SSP: 2180 case SAS_PROTOCOL_SSP:
1960 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! "); 2181 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
1961 break; 2182 break;
1962 case SAS_PROTOCOL_SATA: 2183 case SAS_PROTOCOL_SATA:
1963 case SAS_PROTOCOL_STP: 2184 case SAS_PROTOCOL_STP:
1964 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ 2185 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
1965 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! " 2186 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
1966 "Dump D2H FIS: \n"); 2187#if _MV_DUMP
2188 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
1967 mvs_hexdump(sizeof(struct host_to_dev_fis), 2189 mvs_hexdump(sizeof(struct host_to_dev_fis),
1968 (void *)&task->ata_task.fis, 0); 2190 (void *)&task->ata_task.fis, 0);
1969 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); 2191 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
1970 mvs_hexdump(16, task->ata_task.atapi_packet, 0); 2192 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
2193#endif
2194 spin_lock_irqsave(&task->task_state_lock, flags);
2195 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
2196 /* TODO */
2197 ;
2198 }
2199 spin_unlock_irqrestore(&task->task_state_lock, flags);
1971 break; 2200 break;
1972 } 2201 }
1973 default: 2202 default:
1974 break; 2203 break;
1975 } 2204 }
2205
2206 if (mvs_find_tag(mvi, task, &tag)) {
2207 spin_lock_irqsave(&mvi->lock, flags);
2208 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
2209 spin_unlock_irqrestore(&mvi->lock, flags);
2210 }
2211 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
2212 rc = TMF_RESP_FUNC_COMPLETE;
2213 else
2214 rc = TMF_RESP_FUNC_FAILED;
1976out_done: 2215out_done:
1977 return rc; 2216 return rc;
1978} 2217}
@@ -2001,7 +2240,7 @@ static void mvs_free(struct mvs_info *mvi)
2001 mvi->rx_fis, mvi->rx_fis_dma); 2240 mvi->rx_fis, mvi->rx_fis_dma);
2002 if (mvi->rx) 2241 if (mvi->rx)
2003 dma_free_coherent(&mvi->pdev->dev, 2242 dma_free_coherent(&mvi->pdev->dev,
2004 sizeof(*mvi->rx) * MVS_RX_RING_SZ, 2243 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2005 mvi->rx, mvi->rx_dma); 2244 mvi->rx, mvi->rx_dma);
2006 if (mvi->slot) 2245 if (mvi->slot)
2007 dma_free_coherent(&mvi->pdev->dev, 2246 dma_free_coherent(&mvi->pdev->dev,
@@ -2109,6 +2348,9 @@ static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2109 return NULL; 2348 return NULL;
2110 2349
2111 spin_lock_init(&mvi->lock); 2350 spin_lock_init(&mvi->lock);
2351#ifdef MVS_USE_TASKLET
2352 tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
2353#endif
2112 mvi->pdev = pdev; 2354 mvi->pdev = pdev;
2113 mvi->chip = chip; 2355 mvi->chip = chip;
2114 2356
@@ -2132,6 +2374,10 @@ static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2132 mvs_phy_init(mvi, i); 2374 mvs_phy_init(mvi, i);
2133 arr_phy[i] = &mvi->phy[i].sas_phy; 2375 arr_phy[i] = &mvi->phy[i].sas_phy;
2134 arr_port[i] = &mvi->port[i].sas_port; 2376 arr_port[i] = &mvi->port[i].sas_port;
2377 mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
2378 mvi->port[i].wide_port_phymap = 0;
2379 mvi->port[i].port_attached = 0;
2380 INIT_LIST_HEAD(&mvi->port[i].list);
2135 } 2381 }
2136 2382
2137 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; 2383 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
@@ -2148,9 +2394,10 @@ static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2148 mvi->sas.sas_phy = arr_phy; 2394 mvi->sas.sas_phy = arr_phy;
2149 mvi->sas.sas_port = arr_port; 2395 mvi->sas.sas_port = arr_port;
2150 mvi->sas.num_phys = chip->n_phy; 2396 mvi->sas.num_phys = chip->n_phy;
2151 mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1; 2397 mvi->sas.lldd_max_execute_num = 1;
2152 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; 2398 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2153 mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1; 2399 mvi->shost->can_queue = MVS_CAN_QUEUE;
2400 mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
2154 mvi->sas.lldd_ha = mvi; 2401 mvi->sas.lldd_ha = mvi;
2155 mvi->sas.core.shost = mvi->shost; 2402 mvi->sas.core.shost = mvi->shost;
2156 2403
@@ -2203,11 +2450,11 @@ static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2203 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); 2450 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2204 2451
2205 mvi->rx = dma_alloc_coherent(&pdev->dev, 2452 mvi->rx = dma_alloc_coherent(&pdev->dev,
2206 sizeof(*mvi->rx) * MVS_RX_RING_SZ, 2453 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
2207 &mvi->rx_dma, GFP_KERNEL); 2454 &mvi->rx_dma, GFP_KERNEL);
2208 if (!mvi->rx) 2455 if (!mvi->rx)
2209 goto err_out; 2456 goto err_out;
2210 memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ); 2457 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
2211 2458
2212 mvi->rx[0] = cpu_to_le32(0xfff); 2459 mvi->rx[0] = cpu_to_le32(0xfff);
2213 mvi->rx_cons = 0xfff; 2460 mvi->rx_cons = 0xfff;
@@ -2357,7 +2604,7 @@ static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2357 mvs_cw32(regs, CMD_SAS_CTL0, tmp); 2604 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2358 2605
2359 /* workaround for WDTIMEOUT , set to 550 ms */ 2606 /* workaround for WDTIMEOUT , set to 550 ms */
2360 mvs_cw32(regs, CMD_WD_TIMER, 0xffffff); 2607 mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
2361 2608
2362 /* not to halt for different port op during wideport link change */ 2609 /* not to halt for different port op during wideport link change */
2363 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); 2610 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
@@ -2465,17 +2712,16 @@ static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2465{ 2712{
2466 u32 tmp; 2713 u32 tmp;
2467 struct mvs_phy *phy = &mvi->phy[i]; 2714 struct mvs_phy *phy = &mvi->phy[i];
2468 struct mvs_port *port; 2715 struct mvs_port *port = phy->port;;
2469 2716
2470 tmp = mvs_read_phy_ctl(mvi, i); 2717 tmp = mvs_read_phy_ctl(mvi, i);
2471 2718
2472 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { 2719 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2473 if (!phy->port) 2720 if (!port)
2474 phy->phy_attached = 1; 2721 phy->phy_attached = 1;
2475 return tmp; 2722 return tmp;
2476 } 2723 }
2477 2724
2478 port = phy->port;
2479 if (port) { 2725 if (port) {
2480 if (phy->phy_type & PORT_TYPE_SAS) { 2726 if (phy->phy_type & PORT_TYPE_SAS) {
2481 port->wide_port_phymap &= ~(1U << i); 2727 port->wide_port_phymap &= ~(1U << i);
@@ -2497,7 +2743,7 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2497{ 2743{
2498 struct mvs_phy *phy = &mvi->phy[i]; 2744 struct mvs_phy *phy = &mvi->phy[i];
2499 struct pci_dev *pdev = mvi->pdev; 2745 struct pci_dev *pdev = mvi->pdev;
2500 u32 tmp, j; 2746 u32 tmp;
2501 u64 tmp64; 2747 u64 tmp64;
2502 2748
2503 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); 2749 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
@@ -2524,46 +2770,20 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2524 sas_phy->linkrate = 2770 sas_phy->linkrate =
2525 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 2771 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2526 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; 2772 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2527 2773 phy->minimum_linkrate =
2528 /* Updated attached_sas_addr */ 2774 (phy->phy_status &
2529 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); 2775 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
2530 phy->att_dev_sas_addr = 2776 phy->maximum_linkrate =
2531 (u64) mvs_read_port_cfg_data(mvi, i) << 32; 2777 (phy->phy_status &
2532 2778 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
2533 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2534 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2535
2536 dev_printk(KERN_DEBUG, &pdev->dev,
2537 "phy[%d] Get Attached Address 0x%llX ,"
2538 " SAS Address 0x%llX\n",
2539 i, phy->att_dev_sas_addr, phy->dev_sas_addr);
2540 dev_printk(KERN_DEBUG, &pdev->dev,
2541 "Rate = %x , type = %d\n",
2542 sas_phy->linkrate, phy->phy_type);
2543
2544#if 1
2545 /*
2546 * If the device is capable of supporting a wide port
2547 * on its phys, it may configure the phys as a wide port.
2548 */
2549 if (phy->phy_type & PORT_TYPE_SAS)
2550 for (j = 0; j < mvi->chip->n_phy && j != i; ++j) {
2551 if ((mvi->phy[j].phy_attached) &&
2552 (mvi->phy[j].phy_type & PORT_TYPE_SAS))
2553 if (phy->att_dev_sas_addr ==
2554 mvi->phy[j].att_dev_sas_addr - 1) {
2555 phy->att_dev_sas_addr =
2556 mvi->phy[j].att_dev_sas_addr;
2557 break;
2558 }
2559 }
2560
2561#endif
2562
2563 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2564 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2565 2779
2566 if (phy->phy_type & PORT_TYPE_SAS) { 2780 if (phy->phy_type & PORT_TYPE_SAS) {
2781 /* Updated attached_sas_addr */
2782 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2783 phy->att_dev_sas_addr =
2784 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2785 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2786 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2567 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); 2787 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2568 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); 2788 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2569 phy->identify.device_type = 2789 phy->identify.device_type =
@@ -2582,6 +2802,7 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2582 } else if (phy->phy_type & PORT_TYPE_SATA) { 2802 } else if (phy->phy_type & PORT_TYPE_SATA) {
2583 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 2803 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2584 if (mvs_is_sig_fis_received(phy->irq_status)) { 2804 if (mvs_is_sig_fis_received(phy->irq_status)) {
2805 phy->att_dev_sas_addr = i; /* temp */
2585 if (phy_st & PHY_OOB_DTCTD) 2806 if (phy_st & PHY_OOB_DTCTD)
2586 sas_phy->oob_mode = SATA_OOB_MODE; 2807 sas_phy->oob_mode = SATA_OOB_MODE;
2587 phy->frame_rcvd_size = 2808 phy->frame_rcvd_size =
@@ -2591,20 +2812,34 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2591 } else { 2812 } else {
2592 dev_printk(KERN_DEBUG, &pdev->dev, 2813 dev_printk(KERN_DEBUG, &pdev->dev,
2593 "No sig fis\n"); 2814 "No sig fis\n");
2815 phy->phy_type &= ~(PORT_TYPE_SATA);
2816 goto out_done;
2594 } 2817 }
2595 } 2818 }
2819 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2820 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2821
2822 dev_printk(KERN_DEBUG, &pdev->dev,
2823 "phy[%d] Get Attached Address 0x%llX ,"
2824 " SAS Address 0x%llX\n",
2825 i, phy->att_dev_sas_addr, phy->dev_sas_addr);
2826 dev_printk(KERN_DEBUG, &pdev->dev,
2827 "Rate = %x , type = %d\n",
2828 sas_phy->linkrate, phy->phy_type);
2829
2596 /* workaround for HW phy decoding error on 1.5g disk drive */ 2830 /* workaround for HW phy decoding error on 1.5g disk drive */
2597 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); 2831 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2598 tmp = mvs_read_port_vsr_data(mvi, i); 2832 tmp = mvs_read_port_vsr_data(mvi, i);
2599 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> 2833 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2600 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == 2834 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2601 SAS_LINK_RATE_1_5_GBPS) 2835 SAS_LINK_RATE_1_5_GBPS)
2602 tmp &= ~PHY_MODE6_DTL_SPEED; 2836 tmp &= ~PHY_MODE6_LATECLK;
2603 else 2837 else
2604 tmp |= PHY_MODE6_DTL_SPEED; 2838 tmp |= PHY_MODE6_LATECLK;
2605 mvs_write_port_vsr_data(mvi, i, tmp); 2839 mvs_write_port_vsr_data(mvi, i, tmp);
2606 2840
2607 } 2841 }
2842out_done:
2608 if (get_st) 2843 if (get_st)
2609 mvs_write_port_irq_stat(mvi, i, phy->irq_status); 2844 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2610} 2845}
@@ -2629,6 +2864,11 @@ static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2629 spin_unlock_irqrestore(&mvi->lock, flags); 2864 spin_unlock_irqrestore(&mvi->lock, flags);
2630} 2865}
2631 2866
2867static int mvs_I_T_nexus_reset(struct domain_device *dev)
2868{
2869 return TMF_RESP_FUNC_FAILED;
2870}
2871
2632static int __devinit mvs_hw_init(struct mvs_info *mvi) 2872static int __devinit mvs_hw_init(struct mvs_info *mvi)
2633{ 2873{
2634 void __iomem *regs = mvi->regs; 2874 void __iomem *regs = mvi->regs;
@@ -2790,13 +3030,12 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi)
2790 /* enable CMD/CMPL_Q/RESP mode */ 3030 /* enable CMD/CMPL_Q/RESP mode */
2791 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); 3031 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
2792 3032
2793 /* re-enable interrupts globally */
2794 mvs_hba_interrupt_enable(mvi);
2795
2796 /* enable completion queue interrupt */ 3033 /* enable completion queue interrupt */
2797 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM); 3034 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
2798 mw32(INT_MASK, tmp); 3035 mw32(INT_MASK, tmp);
2799 3036
3037 /* Enable SRS interrupt */
3038 mw32(INT_MASK_SRS, 0xFF);
2800 return 0; 3039 return 0;
2801} 3040}
2802 3041
@@ -2870,6 +3109,8 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
2870 3109
2871 mvs_print_info(mvi); 3110 mvs_print_info(mvi);
2872 3111
3112 mvs_hba_interrupt_enable(mvi);
3113
2873 scsi_scan_host(mvi->shost); 3114 scsi_scan_host(mvi->shost);
2874 3115
2875 return 0; 3116 return 0;
@@ -2915,12 +3156,22 @@ static struct sas_domain_function_template mvs_transport_ops = {
2915 .lldd_execute_task = mvs_task_exec, 3156 .lldd_execute_task = mvs_task_exec,
2916 .lldd_control_phy = mvs_phy_control, 3157 .lldd_control_phy = mvs_phy_control,
2917 .lldd_abort_task = mvs_task_abort, 3158 .lldd_abort_task = mvs_task_abort,
2918 .lldd_port_formed = mvs_port_formed 3159 .lldd_port_formed = mvs_port_formed,
3160 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
2919}; 3161};
2920 3162
2921static struct pci_device_id __devinitdata mvs_pci_table[] = { 3163static struct pci_device_id __devinitdata mvs_pci_table[] = {
2922 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, 3164 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
2923 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, 3165 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
3166 {
3167 .vendor = PCI_VENDOR_ID_MARVELL,
3168 .device = 0x6440,
3169 .subvendor = PCI_ANY_ID,
3170 .subdevice = 0x6480,
3171 .class = 0,
3172 .class_mask = 0,
3173 .driver_data = chip_6480,
3174 },
2924 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, 3175 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
2925 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, 3176 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
2926 3177