aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorKe Wei <kewei@marvell.com>2008-03-27 02:53:47 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-03-28 13:27:54 -0400
commitee1f1c2ef95258351e1ecb89a2dbd2763cb3a6ed (patch)
tree8184deaa6482fecadda7b0c3df82bb550182444c /drivers/scsi
parent0eb9ddd82a5cb08f3622345e723d236eefa0039f (diff)
[SCSI] mvsas: a tag handler implementation
add a new tag handler to create slot num. When a slot num is busy, new task can't hit this bit which was already used. plumb in phy speeds. Signed-off-by: Ke Wei <kewei@marvell.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/mvsas.c130
1 files changed, 87 insertions, 43 deletions
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
index 3a447fea0b1f..9ebf56510d21 100644
--- a/drivers/scsi/mvsas.c
+++ b/drivers/scsi/mvsas.c
@@ -760,10 +760,10 @@ static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
760 printk("\n"); 760 printk("\n");
761} 761}
762 762
763#if _MV_DUMP
763static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, 764static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
764 enum sas_protocol proto) 765 enum sas_protocol proto)
765{ 766{
766#if _MV_DUMP
767 u32 offset; 767 u32 offset;
768 struct pci_dev *pdev = mvi->pdev; 768 struct pci_dev *pdev = mvi->pdev;
769 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 769 struct mvs_slot_info *slot = &mvi->slot_info[tag];
@@ -774,14 +774,14 @@ static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
774 tag); 774 tag);
775 mvs_hexdump(32, (u8 *) slot->response, 775 mvs_hexdump(32, (u8 *) slot->response,
776 (u32) slot->buf_dma + offset); 776 (u32) slot->buf_dma + offset);
777#endif
778} 777}
778#endif
779 779
780static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, 780static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
781 enum sas_protocol proto) 781 enum sas_protocol proto)
782{ 782{
783#if _MV_DUMP 783#if _MV_DUMP
784 u32 sz, w_ptr, r_ptr; 784 u32 sz, w_ptr;
785 u64 addr; 785 u64 addr;
786 void __iomem *regs = mvi->regs; 786 void __iomem *regs = mvi->regs;
787 struct pci_dev *pdev = mvi->pdev; 787 struct pci_dev *pdev = mvi->pdev;
@@ -789,12 +789,10 @@ static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
789 789
790 /*Delivery Queue */ 790 /*Delivery Queue */
791 sz = mr32(TX_CFG) & TX_RING_SZ_MASK; 791 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
792 w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK; 792 w_ptr = slot->tx;
793 r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK;
794 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); 793 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
795 dev_printk(KERN_DEBUG, &pdev->dev, 794 dev_printk(KERN_DEBUG, &pdev->dev,
796 "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n", 795 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
797 sz, w_ptr, r_ptr);
798 dev_printk(KERN_DEBUG, &pdev->dev, 796 dev_printk(KERN_DEBUG, &pdev->dev,
799 "Delivery Queue Base Address=0x%llX (PA)" 797 "Delivery Queue Base Address=0x%llX (PA)"
800 "(tx_dma=0x%llX), Entry=%04d\n", 798 "(tx_dma=0x%llX), Entry=%04d\n",
@@ -802,11 +800,11 @@ static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
802 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), 800 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
803 (u32) mvi->tx_dma + sizeof(u32) * w_ptr); 801 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
804 /*Command List */ 802 /*Command List */
805 addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO); 803 addr = mvi->slot_dma;
806 dev_printk(KERN_DEBUG, &pdev->dev, 804 dev_printk(KERN_DEBUG, &pdev->dev,
807 "Command List Base Address=0x%llX (PA)" 805 "Command List Base Address=0x%llX (PA)"
808 "(slot_dma=0x%llX), Header=%03d\n", 806 "(slot_dma=0x%llX), Header=%03d\n",
809 addr, mvi->slot_dma, tag); 807 addr, slot->buf_dma, tag);
810 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); 808 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
811 /*mvs_cmd_hdr */ 809 /*mvs_cmd_hdr */
812 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), 810 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
@@ -830,7 +828,7 @@ static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
830 828
831static void mvs_hba_cq_dump(struct mvs_info *mvi) 829static void mvs_hba_cq_dump(struct mvs_info *mvi)
832{ 830{
833#if _MV_DUMP 831#if (_MV_DUMP > 2)
834 u64 addr; 832 u64 addr;
835 void __iomem *regs = mvi->regs; 833 void __iomem *regs = mvi->regs;
836 struct pci_dev *pdev = mvi->pdev; 834 struct pci_dev *pdev = mvi->pdev;
@@ -839,8 +837,8 @@ static void mvs_hba_cq_dump(struct mvs_info *mvi)
839 837
840 /*Completion Queue */ 838 /*Completion Queue */
841 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); 839 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
842 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n", 840 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
843 (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); 841 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
844 dev_printk(KERN_DEBUG, &pdev->dev, 842 dev_printk(KERN_DEBUG, &pdev->dev,
845 "Completion List Base Address=0x%llX (PA), " 843 "Completion List Base Address=0x%llX (PA), "
846 "CQ_Entry=%04d, CQ_WP=0x%08X\n", 844 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
@@ -905,34 +903,53 @@ static int pci_go_64(struct pci_dev *pdev)
905 return rc; 903 return rc;
906} 904}
907 905
906static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
907{
908 if (task->lldd_task) {
909 struct mvs_slot_info *slot;
910 slot = (struct mvs_slot_info *) task->lldd_task;
911 *tag = slot - mvi->slot_info;
912 return 1;
913 }
914 return 0;
915}
916
908static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 917static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
909{ 918{
910 mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1); 919 void *bitmap = (void *) &mvi->tags;
911 mvi->tags[mvi->tag_in] = tag; 920 clear_bit(tag, bitmap);
912} 921}
913 922
914static void mvs_tag_free(struct mvs_info *mvi, u32 tag) 923static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
915{ 924{
916 mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1); 925 mvs_tag_clear(mvi, tag);
926}
927
928static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
929{
930 void *bitmap = (void *) &mvi->tags;
931 set_bit(tag, bitmap);
917} 932}
918 933
919static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 934static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
920{ 935{
921 if (mvi->tag_out != mvi->tag_in) { 936 unsigned int index, tag;
922 *tag_out = mvi->tags[mvi->tag_out]; 937 void *bitmap = (void *) &mvi->tags;
923 mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1); 938
924 return 0; 939 index = find_first_zero_bit(bitmap, MVS_SLOTS);
925 } 940 tag = index;
926 return -EBUSY; 941 if (tag >= MVS_SLOTS)
942 return -SAS_QUEUE_FULL;
943 mvs_tag_set(mvi, tag);
944 *tag_out = tag;
945 return 0;
927} 946}
928 947
929static void mvs_tag_init(struct mvs_info *mvi) 948static void mvs_tag_init(struct mvs_info *mvi)
930{ 949{
931 int i; 950 int i;
932 for (i = 0; i < MVS_SLOTS; ++i) 951 for (i = 0; i < MVS_SLOTS; ++i)
933 mvi->tags[i] = i; 952 mvs_tag_clear(mvi, i);
934 mvi->tag_out = 0;
935 mvi->tag_in = MVS_SLOTS - 1;
936} 953}
937 954
938#ifndef MVS_DISABLE_NVRAM 955#ifndef MVS_DISABLE_NVRAM
@@ -1064,10 +1081,21 @@ err_out:
1064static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) 1081static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1065{ 1082{
1066 struct mvs_phy *phy = &mvi->phy[i]; 1083 struct mvs_phy *phy = &mvi->phy[i];
1084 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
1067 1085
1068 if (!phy->phy_attached) 1086 if (!phy->phy_attached)
1069 return; 1087 return;
1070 1088
1089 if (sas_phy->phy) {
1090 struct sas_phy *sphy = sas_phy->phy;
1091
1092 sphy->negotiated_linkrate = sas_phy->linkrate;
1093 sphy->minimum_linkrate = phy->minimum_linkrate;
1094 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
1095 sphy->maximum_linkrate = phy->maximum_linkrate;
1096 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
1097 }
1098
1071 if (phy->phy_type & PORT_TYPE_SAS) { 1099 if (phy->phy_type & PORT_TYPE_SAS) {
1072 struct sas_identify_frame *id; 1100 struct sas_identify_frame *id;
1073 1101
@@ -1104,72 +1132,88 @@ static void mvs_scan_start(struct Scsi_Host *shost)
1104 } 1132 }
1105} 1133}
1106 1134
1107static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev) 1135static int mvs_slave_configure(struct scsi_device *sdev)
1108{ 1136{
1109 int rc; 1137 struct domain_device *dev = sdev_to_domain_dev(sdev);
1138 int ret = sas_slave_configure(sdev);
1110 1139
1111 rc = sas_slave_alloc(scsi_dev); 1140 if (ret)
1141 return ret;
1112 1142
1113 return rc; 1143 if (dev_is_sata(dev)) {
1144 /* struct ata_port *ap = dev->sata_dev.ap; */
1145 /* struct ata_device *adev = ap->link.device; */
1146
1147 /* clamp at no NCQ for the time being */
1148 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
1149 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
1150 }
1151 return 0;
1114} 1152}
1115 1153
1116static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events) 1154static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1117{ 1155{
1118 struct pci_dev *pdev = mvi->pdev; 1156 struct pci_dev *pdev = mvi->pdev;
1119 struct sas_ha_struct *sas_ha = &mvi->sas; 1157 struct sas_ha_struct *sas_ha = &mvi->sas;
1120 struct mvs_phy *phy = &mvi->phy[port_no]; 1158 struct mvs_phy *phy = &mvi->phy[phy_no];
1121 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1159 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1122 1160
1123 phy->irq_status = mvs_read_port_irq_stat(mvi, port_no); 1161 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
1124 /* 1162 /*
1125 * events is port event now , 1163 * events is port event now ,
1126 * we need check the interrupt status which belongs to per port. 1164 * we need check the interrupt status which belongs to per port.
1127 */ 1165 */
1128 dev_printk(KERN_DEBUG, &pdev->dev, 1166 dev_printk(KERN_DEBUG, &pdev->dev,
1129 "Port %d Event = %X\n", 1167 "Port %d Event = %X\n",
1130 port_no, phy->irq_status); 1168 phy_no, phy->irq_status);
1131 1169
1132 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { 1170 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1133 if (!mvs_is_phy_ready(mvi, port_no)) { 1171 mvs_release_task(mvi, phy_no);
1172 if (!mvs_is_phy_ready(mvi, phy_no)) {
1134 sas_phy_disconnected(sas_phy); 1173 sas_phy_disconnected(sas_phy);
1135 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 1174 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1175 dev_printk(KERN_INFO, &pdev->dev,
1176 "Port %d Unplug Notice\n", phy_no);
1177
1136 } else 1178 } else
1137 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); 1179 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1138 } 1180 }
1139 if (!(phy->irq_status & PHYEV_DEC_ERR)) { 1181 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1140 if (phy->irq_status & PHYEV_COMWAKE) { 1182 if (phy->irq_status & PHYEV_COMWAKE) {
1141 u32 tmp = mvs_read_port_irq_mask(mvi, port_no); 1183 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1142 mvs_write_port_irq_mask(mvi, port_no, 1184 mvs_write_port_irq_mask(mvi, phy_no,
1143 tmp | PHYEV_SIG_FIS); 1185 tmp | PHYEV_SIG_FIS);
1144 } 1186 }
1145 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 1187 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1146 phy->phy_status = mvs_is_phy_ready(mvi, port_no); 1188 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1147 if (phy->phy_status) { 1189 if (phy->phy_status) {
1148 mvs_detect_porttype(mvi, port_no); 1190 mvs_detect_porttype(mvi, phy_no);
1149 1191
1150 if (phy->phy_type & PORT_TYPE_SATA) { 1192 if (phy->phy_type & PORT_TYPE_SATA) {
1151 u32 tmp = mvs_read_port_irq_mask(mvi, 1193 u32 tmp = mvs_read_port_irq_mask(mvi,
1152 port_no); 1194 phy_no);
1153 tmp &= ~PHYEV_SIG_FIS; 1195 tmp &= ~PHYEV_SIG_FIS;
1154 mvs_write_port_irq_mask(mvi, 1196 mvs_write_port_irq_mask(mvi,
1155 port_no, tmp); 1197 phy_no, tmp);
1156 } 1198 }
1157 1199
1158 mvs_update_phyinfo(mvi, port_no, 0); 1200 mvs_update_phyinfo(mvi, phy_no, 0);
1159 sas_ha->notify_phy_event(sas_phy, 1201 sas_ha->notify_phy_event(sas_phy,
1160 PHYE_OOB_DONE); 1202 PHYE_OOB_DONE);
1161 mvs_bytes_dmaed(mvi, port_no); 1203 mvs_bytes_dmaed(mvi, phy_no);
1162 } else { 1204 } else {
1163 dev_printk(KERN_DEBUG, &pdev->dev, 1205 dev_printk(KERN_DEBUG, &pdev->dev,
1164 "plugin interrupt but phy is gone\n"); 1206 "plugin interrupt but phy is gone\n");
1165 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, 1207 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1166 NULL); 1208 NULL);
1167 } 1209 }
1168 } else if (phy->irq_status & PHYEV_BROAD_CH) 1210 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1211 mvs_release_task(mvi, phy_no);
1169 sas_ha->notify_port_event(sas_phy, 1212 sas_ha->notify_port_event(sas_phy,
1170 PORTE_BROADCAST_RCVD); 1213 PORTE_BROADCAST_RCVD);
1214 }
1171 } 1215 }
1172 mvs_write_port_irq_stat(mvi, port_no, phy->irq_status); 1216 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
1173} 1217}
1174 1218
1175static void mvs_int_sata(struct mvs_info *mvi) 1219static void mvs_int_sata(struct mvs_info *mvi)