aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mvsas/mv_sas.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/mvsas/mv_sas.c')
-rw-r--r--drivers/scsi/mvsas/mv_sas.c422
1 files changed, 163 insertions, 259 deletions
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0ef27425c447..4958fefff365 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -38,7 +38,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
38 38
39void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 39void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
40{ 40{
41 void *bitmap = &mvi->tags; 41 void *bitmap = mvi->tags;
42 clear_bit(tag, bitmap); 42 clear_bit(tag, bitmap);
43} 43}
44 44
@@ -49,14 +49,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag)
49 49
50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 50void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
51{ 51{
52 void *bitmap = &mvi->tags; 52 void *bitmap = mvi->tags;
53 set_bit(tag, bitmap); 53 set_bit(tag, bitmap);
54} 54}
55 55
56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 56inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
57{ 57{
58 unsigned int index, tag; 58 unsigned int index, tag;
59 void *bitmap = &mvi->tags; 59 void *bitmap = mvi->tags;
60 60
61 index = find_first_zero_bit(bitmap, mvi->tags_num); 61 index = find_first_zero_bit(bitmap, mvi->tags_num);
62 tag = index; 62 tag = index;
@@ -74,126 +74,6 @@ void mvs_tag_init(struct mvs_info *mvi)
74 mvs_tag_clear(mvi, i); 74 mvs_tag_clear(mvi, i);
75} 75}
76 76
77void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
78{
79 u32 i;
80 u32 run;
81 u32 offset;
82
83 offset = 0;
84 while (size) {
85 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
86 if (size >= 16)
87 run = 16;
88 else
89 run = size;
90 size -= run;
91 for (i = 0; i < 16; i++) {
92 if (i < run)
93 printk(KERN_DEBUG"%02X ", (u32)data[i]);
94 else
95 printk(KERN_DEBUG" ");
96 }
97 printk(KERN_DEBUG": ");
98 for (i = 0; i < run; i++)
99 printk(KERN_DEBUG"%c",
100 isalnum(data[i]) ? data[i] : '.');
101 printk(KERN_DEBUG"\n");
102 data = &data[16];
103 offset += run;
104 }
105 printk(KERN_DEBUG"\n");
106}
107
108#if (_MV_DUMP > 1)
109static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
110 enum sas_protocol proto)
111{
112 u32 offset;
113 struct mvs_slot_info *slot = &mvi->slot_info[tag];
114
115 offset = slot->cmd_size + MVS_OAF_SZ +
116 MVS_CHIP_DISP->prd_size() * slot->n_elem;
117 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
118 tag);
119 mvs_hexdump(32, (u8 *) slot->response,
120 (u32) slot->buf_dma + offset);
121}
122#endif
123
124static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
125 enum sas_protocol proto)
126{
127#if (_MV_DUMP > 1)
128 u32 sz, w_ptr;
129 u64 addr;
130 struct mvs_slot_info *slot = &mvi->slot_info[tag];
131
132 /*Delivery Queue */
133 sz = MVS_CHIP_SLOT_SZ;
134 w_ptr = slot->tx;
135 addr = mvi->tx_dma;
136 dev_printk(KERN_DEBUG, mvi->dev,
137 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
138 dev_printk(KERN_DEBUG, mvi->dev,
139 "Delivery Queue Base Address=0x%llX (PA)"
140 "(tx_dma=0x%llX), Entry=%04d\n",
141 addr, (unsigned long long)mvi->tx_dma, w_ptr);
142 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
143 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
144 /*Command List */
145 addr = mvi->slot_dma;
146 dev_printk(KERN_DEBUG, mvi->dev,
147 "Command List Base Address=0x%llX (PA)"
148 "(slot_dma=0x%llX), Header=%03d\n",
149 addr, (unsigned long long)slot->buf_dma, tag);
150 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
151 /*mvs_cmd_hdr */
152 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
153 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
154 /*1.command table area */
155 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
156 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
157 /*2.open address frame area */
158 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
159 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
160 (u32) slot->buf_dma + slot->cmd_size);
161 /*3.status buffer */
162 mvs_hba_sb_dump(mvi, tag, proto);
163 /*4.PRD table */
164 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
165 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
166 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
167 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
168#endif
169}
170
171static void mvs_hba_cq_dump(struct mvs_info *mvi)
172{
173#if (_MV_DUMP > 2)
174 u64 addr;
175 void __iomem *regs = mvi->regs;
176 u32 entry = mvi->rx_cons + 1;
177 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
178
179 /*Completion Queue */
180 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
181 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
182 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
183 dev_printk(KERN_DEBUG, mvi->dev,
184 "Completion List Base Address=0x%llX (PA), "
185 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
186 addr, entry - 1, mvi->rx[0]);
187 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
188 mvi->rx_dma + sizeof(u32) * entry);
189#endif
190}
191
192void mvs_get_sas_addr(void *buf, u32 buflen)
193{
194 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
195}
196
197struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 77struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
198{ 78{
199 unsigned long i = 0, j = 0, hi = 0; 79 unsigned long i = 0, j = 0, hi = 0;
@@ -222,7 +102,6 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
222 102
223} 103}
224 104
225/* FIXME */
226int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 105int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
227{ 106{
228 unsigned long i = 0, j = 0, n = 0, num = 0; 107 unsigned long i = 0, j = 0, n = 0, num = 0;
@@ -253,6 +132,20 @@ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
253 return num; 132 return num;
254} 133}
255 134
135struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
136 u8 reg_set)
137{
138 u32 dev_no;
139 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
140 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
141 continue;
142
143 if (mvi->devices[dev_no].taskfileset == reg_set)
144 return &mvi->devices[dev_no];
145 }
146 return NULL;
147}
148
256static inline void mvs_free_reg_set(struct mvs_info *mvi, 149static inline void mvs_free_reg_set(struct mvs_info *mvi,
257 struct mvs_device *dev) 150 struct mvs_device *dev)
258{ 151{
@@ -283,7 +176,6 @@ void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
283 } 176 }
284} 177}
285 178
286/* FIXME: locking? */
287int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 179int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
288 void *funcdata) 180 void *funcdata)
289{ 181{
@@ -309,12 +201,12 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
309 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); 201 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
310 if (tmp & PHY_RST_HARD) 202 if (tmp & PHY_RST_HARD)
311 break; 203 break;
312 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); 204 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
313 break; 205 break;
314 206
315 case PHY_FUNC_LINK_RESET: 207 case PHY_FUNC_LINK_RESET:
316 MVS_CHIP_DISP->phy_enable(mvi, phy_id); 208 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
317 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); 209 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
318 break; 210 break;
319 211
320 case PHY_FUNC_DISABLE: 212 case PHY_FUNC_DISABLE:
@@ -406,14 +298,10 @@ int mvs_slave_configure(struct scsi_device *sdev)
406 298
407 if (ret) 299 if (ret)
408 return ret; 300 return ret;
409 if (dev_is_sata(dev)) { 301 if (!dev_is_sata(dev)) {
410 /* may set PIO mode */ 302 sas_change_queue_depth(sdev,
411 #if MV_DISABLE_NCQ 303 MVS_QUEUE_SIZE,
412 struct ata_port *ap = dev->sata_dev.ap; 304 SCSI_QDEPTH_DEFAULT);
413 struct ata_device *adev = ap->link.device;
414 adev->flags |= ATA_DFLAG_NCQ_OFF;
415 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
416 #endif
417 } 305 }
418 return 0; 306 return 0;
419} 307}
@@ -424,6 +312,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
424 unsigned short core_nr; 312 unsigned short core_nr;
425 struct mvs_info *mvi; 313 struct mvs_info *mvi;
426 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 314 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
315 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
427 316
428 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 317 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
429 318
@@ -432,15 +321,17 @@ void mvs_scan_start(struct Scsi_Host *shost)
432 for (i = 0; i < mvi->chip->n_phy; ++i) 321 for (i = 0; i < mvi->chip->n_phy; ++i)
433 mvs_bytes_dmaed(mvi, i); 322 mvs_bytes_dmaed(mvi, i);
434 } 323 }
324 mvs_prv->scan_finished = 1;
435} 325}
436 326
437int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 327int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
438{ 328{
439 /* give the phy enabling interrupt event time to come in (1s 329 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
440 * is empirically about all it takes) */ 330 struct mvs_prv_info *mvs_prv = sha->lldd_ha;
441 if (time < HZ) 331
332 if (mvs_prv->scan_finished == 0)
442 return 0; 333 return 0;
443 /* Wait for discovery to finish */ 334
444 scsi_flush_work(shost); 335 scsi_flush_work(shost);
445 return 1; 336 return 1;
446} 337}
@@ -461,10 +352,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
461 void *buf_prd; 352 void *buf_prd;
462 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 353 struct mvs_slot_info *slot = &mvi->slot_info[tag];
463 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 354 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
464#if _MV_DUMP 355
465 u8 *buf_cmd;
466 void *from;
467#endif
468 /* 356 /*
469 * DMA-map SMP request, response buffers 357 * DMA-map SMP request, response buffers
470 */ 358 */
@@ -496,15 +384,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
496 buf_tmp = slot->buf; 384 buf_tmp = slot->buf;
497 buf_tmp_dma = slot->buf_dma; 385 buf_tmp_dma = slot->buf_dma;
498 386
499#if _MV_DUMP
500 buf_cmd = buf_tmp;
501 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
502 buf_tmp += req_len;
503 buf_tmp_dma += req_len;
504 slot->cmd_size = req_len;
505#else
506 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); 387 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
507#endif
508 388
509 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 389 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
510 buf_oaf = buf_tmp; 390 buf_oaf = buf_tmp;
@@ -553,12 +433,6 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
553 /* fill in PRD (scatter/gather) table, if any */ 433 /* fill in PRD (scatter/gather) table, if any */
554 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 434 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
555 435
556#if _MV_DUMP
557 /* copy cmd table */
558 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
559 memcpy(buf_cmd, from + sg_req->offset, req_len);
560 kunmap_atomic(from, KM_IRQ0);
561#endif
562 return 0; 436 return 0;
563 437
564err_out_2: 438err_out_2:
@@ -616,14 +490,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
616 (mvi_dev->taskfileset << TXQ_SRS_SHIFT); 490 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
617 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); 491 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
618 492
619#ifndef DISABLE_HOTPLUG_DMA_FIX
620 if (task->data_dir == DMA_FROM_DEVICE) 493 if (task->data_dir == DMA_FROM_DEVICE)
621 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); 494 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
622 else 495 else
623 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 496 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
624#else 497
625 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
626#endif
627 if (task->ata_task.use_ncq) 498 if (task->ata_task.use_ncq)
628 flags |= MCH_FPDMA; 499 flags |= MCH_FPDMA;
629 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { 500 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
@@ -631,11 +502,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
631 flags |= MCH_ATAPI; 502 flags |= MCH_ATAPI;
632 } 503 }
633 504
634 /* FIXME: fill in port multiplier number */
635
636 hdr->flags = cpu_to_le32(flags); 505 hdr->flags = cpu_to_le32(flags);
637 506
638 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
639 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) 507 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
640 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 508 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
641 else 509 else
@@ -657,9 +525,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
657 525
658 buf_tmp += MVS_ATA_CMD_SZ; 526 buf_tmp += MVS_ATA_CMD_SZ;
659 buf_tmp_dma += MVS_ATA_CMD_SZ; 527 buf_tmp_dma += MVS_ATA_CMD_SZ;
660#if _MV_DUMP
661 slot->cmd_size = MVS_ATA_CMD_SZ;
662#endif
663 528
664 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 529 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
665 /* used for STP. unused for SATA? */ 530 /* used for STP. unused for SATA? */
@@ -682,9 +547,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
682 buf_tmp_dma += i; 547 buf_tmp_dma += i;
683 548
684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 549 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
685 /* FIXME: probably unused, for SATA. kept here just in case
686 * we get a STP/SATA error information record
687 */
688 slot->response = buf_tmp; 550 slot->response = buf_tmp;
689 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 551 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
690 if (mvi->flags & MVF_FLAG_SOC) 552 if (mvi->flags & MVF_FLAG_SOC)
@@ -715,11 +577,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
715 577
716 /* fill in PRD (scatter/gather) table, if any */ 578 /* fill in PRD (scatter/gather) table, if any */
717 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 579 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
718#ifndef DISABLE_HOTPLUG_DMA_FIX 580
719 if (task->data_dir == DMA_FROM_DEVICE) 581 if (task->data_dir == DMA_FROM_DEVICE)
720 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, 582 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
721 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); 583 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
722#endif 584
723 return 0; 585 return 0;
724} 586}
725 587
@@ -761,6 +623,9 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
761 } 623 }
762 if (is_tmf) 624 if (is_tmf)
763 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); 625 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
626 else
627 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
628
764 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); 629 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
765 hdr->tags = cpu_to_le32(tag); 630 hdr->tags = cpu_to_le32(tag);
766 hdr->data_len = cpu_to_le32(task->total_xfer_len); 631 hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -777,9 +642,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
777 642
778 buf_tmp += MVS_SSP_CMD_SZ; 643 buf_tmp += MVS_SSP_CMD_SZ;
779 buf_tmp_dma += MVS_SSP_CMD_SZ; 644 buf_tmp_dma += MVS_SSP_CMD_SZ;
780#if _MV_DUMP
781 slot->cmd_size = MVS_SSP_CMD_SZ;
782#endif
783 645
784 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 646 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
785 buf_oaf = buf_tmp; 647 buf_oaf = buf_tmp;
@@ -986,7 +848,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
986 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 848 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
987 spin_unlock(&task->task_state_lock); 849 spin_unlock(&task->task_state_lock);
988 850
989 mvs_hba_memory_dump(mvi, tag, task->task_proto);
990 mvi_dev->running_req++; 851 mvi_dev->running_req++;
991 ++(*pass); 852 ++(*pass);
992 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 853 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
@@ -1189,9 +1050,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1189 mvs_slot_free(mvi, slot_idx); 1050 mvs_slot_free(mvi, slot_idx);
1190} 1051}
1191 1052
1192static void mvs_update_wideport(struct mvs_info *mvi, int i) 1053static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
1193{ 1054{
1194 struct mvs_phy *phy = &mvi->phy[i]; 1055 struct mvs_phy *phy = &mvi->phy[phy_no];
1195 struct mvs_port *port = phy->port; 1056 struct mvs_port *port = phy->port;
1196 int j, no; 1057 int j, no;
1197 1058
@@ -1246,18 +1107,17 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1246 return NULL; 1107 return NULL;
1247 1108
1248 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 1109 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1249 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1110 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1250 1111
1251 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 1112 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1252 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1113 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1253 1114
1254 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 1115 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1255 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1116 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1256 1117
1257 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 1118 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1258 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); 1119 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
1259 1120
1260 /* Workaround: take some ATAPI devices for ATA */
1261 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) 1121 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1262 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); 1122 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1263 1123
@@ -1269,6 +1129,13 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
1269 return irq_status & PHYEV_SIG_FIS; 1129 return irq_status & PHYEV_SIG_FIS;
1270} 1130}
1271 1131
1132static void mvs_sig_remove_timer(struct mvs_phy *phy)
1133{
1134 if (phy->timer.function)
1135 del_timer(&phy->timer);
1136 phy->timer.function = NULL;
1137}
1138
1272void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) 1139void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1273{ 1140{
1274 struct mvs_phy *phy = &mvi->phy[i]; 1141 struct mvs_phy *phy = &mvi->phy[i];
@@ -1291,6 +1158,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1291 if (phy->phy_type & PORT_TYPE_SATA) { 1158 if (phy->phy_type & PORT_TYPE_SATA) {
1292 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 1159 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1293 if (mvs_is_sig_fis_received(phy->irq_status)) { 1160 if (mvs_is_sig_fis_received(phy->irq_status)) {
1161 mvs_sig_remove_timer(phy);
1294 phy->phy_attached = 1; 1162 phy->phy_attached = 1;
1295 phy->att_dev_sas_addr = 1163 phy->att_dev_sas_addr =
1296 i + mvi->id * mvi->chip->n_phy; 1164 i + mvi->id * mvi->chip->n_phy;
@@ -1308,7 +1176,6 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1308 tmp | PHYEV_SIG_FIS); 1176 tmp | PHYEV_SIG_FIS);
1309 phy->phy_attached = 0; 1177 phy->phy_attached = 0;
1310 phy->phy_type &= ~PORT_TYPE_SATA; 1178 phy->phy_type &= ~PORT_TYPE_SATA;
1311 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1312 goto out_done; 1179 goto out_done;
1313 } 1180 }
1314 } else if (phy->phy_type & PORT_TYPE_SAS 1181 } else if (phy->phy_type & PORT_TYPE_SAS
@@ -1334,9 +1201,9 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1334 if (MVS_CHIP_DISP->phy_work_around) 1201 if (MVS_CHIP_DISP->phy_work_around)
1335 MVS_CHIP_DISP->phy_work_around(mvi, i); 1202 MVS_CHIP_DISP->phy_work_around(mvi, i);
1336 } 1203 }
1337 mv_dprintk("port %d attach dev info is %x\n", 1204 mv_dprintk("phy %d attach dev info is %x\n",
1338 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); 1205 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1339 mv_dprintk("port %d attach sas addr is %llx\n", 1206 mv_dprintk("phy %d attach sas addr is %llx\n",
1340 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); 1207 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1341out_done: 1208out_done:
1342 if (get_st) 1209 if (get_st)
@@ -1361,10 +1228,10 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1361 } 1228 }
1362 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; 1229 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1363 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; 1230 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1364 if (sas_port->id >= mvi->chip->n_phy) 1231 if (i >= mvi->chip->n_phy)
1365 port = &mvi->port[sas_port->id - mvi->chip->n_phy]; 1232 port = &mvi->port[i - mvi->chip->n_phy];
1366 else 1233 else
1367 port = &mvi->port[sas_port->id]; 1234 port = &mvi->port[i];
1368 if (lock) 1235 if (lock)
1369 spin_lock_irqsave(&mvi->lock, flags); 1236 spin_lock_irqsave(&mvi->lock, flags);
1370 port->port_attached = 1; 1237 port->port_attached = 1;
@@ -1393,7 +1260,7 @@ static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1393 return; 1260 return;
1394 } 1261 }
1395 list_for_each_entry(dev, &port->dev_list, dev_list_node) 1262 list_for_each_entry(dev, &port->dev_list, dev_list_node)
1396 mvs_do_release_task(phy->mvi, phy_no, NULL); 1263 mvs_do_release_task(phy->mvi, phy_no, dev);
1397 1264
1398} 1265}
1399 1266
@@ -1457,6 +1324,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
1457 mvi_device->dev_status = MVS_DEV_NORMAL; 1324 mvi_device->dev_status = MVS_DEV_NORMAL;
1458 mvi_device->dev_type = dev->dev_type; 1325 mvi_device->dev_type = dev->dev_type;
1459 mvi_device->mvi_info = mvi; 1326 mvi_device->mvi_info = mvi;
1327 mvi_device->sas_device = dev;
1460 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 1328 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1461 int phy_id; 1329 int phy_id;
1462 u8 phy_num = parent_dev->ex_dev.num_phys; 1330 u8 phy_num = parent_dev->ex_dev.num_phys;
@@ -1508,6 +1376,7 @@ void mvs_dev_gone_notify(struct domain_device *dev)
1508 mv_dprintk("found dev has gone.\n"); 1376 mv_dprintk("found dev has gone.\n");
1509 } 1377 }
1510 dev->lldd_dev = NULL; 1378 dev->lldd_dev = NULL;
1379 mvi_dev->sas_device = NULL;
1511 1380
1512 spin_unlock_irqrestore(&mvi->lock, flags); 1381 spin_unlock_irqrestore(&mvi->lock, flags);
1513} 1382}
@@ -1555,7 +1424,6 @@ static void mvs_tmf_timedout(unsigned long data)
1555 complete(&task->completion); 1424 complete(&task->completion);
1556} 1425}
1557 1426
1558/* XXX */
1559#define MVS_TASK_TIMEOUT 20 1427#define MVS_TASK_TIMEOUT 20
1560static int mvs_exec_internal_tmf_task(struct domain_device *dev, 1428static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1561 void *parameter, u32 para_len, struct mvs_tmf_task *tmf) 1429 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
@@ -1588,7 +1456,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1588 } 1456 }
1589 1457
1590 wait_for_completion(&task->completion); 1458 wait_for_completion(&task->completion);
1591 res = -TMF_RESP_FUNC_FAILED; 1459 res = TMF_RESP_FUNC_FAILED;
1592 /* Even TMF timed out, return direct. */ 1460 /* Even TMF timed out, return direct. */
1593 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1461 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1594 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1462 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -1638,11 +1506,10 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1638 u8 *lun, struct mvs_tmf_task *tmf) 1506 u8 *lun, struct mvs_tmf_task *tmf)
1639{ 1507{
1640 struct sas_ssp_task ssp_task; 1508 struct sas_ssp_task ssp_task;
1641 DECLARE_COMPLETION_ONSTACK(completion);
1642 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 1509 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1643 return TMF_RESP_FUNC_ESUPP; 1510 return TMF_RESP_FUNC_ESUPP;
1644 1511
1645 strncpy((u8 *)&ssp_task.LUN, lun, 8); 1512 memcpy(ssp_task.LUN, lun, 8);
1646 1513
1647 return mvs_exec_internal_tmf_task(dev, &ssp_task, 1514 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1648 sizeof(ssp_task), tmf); 1515 sizeof(ssp_task), tmf);
@@ -1666,7 +1533,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1666int mvs_lu_reset(struct domain_device *dev, u8 *lun) 1533int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1667{ 1534{
1668 unsigned long flags; 1535 unsigned long flags;
1669 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; 1536 int rc = TMF_RESP_FUNC_FAILED;
1670 struct mvs_tmf_task tmf_task; 1537 struct mvs_tmf_task tmf_task;
1671 struct mvs_device * mvi_dev = dev->lldd_dev; 1538 struct mvs_device * mvi_dev = dev->lldd_dev;
1672 struct mvs_info *mvi = mvi_dev->mvi_info; 1539 struct mvs_info *mvi = mvi_dev->mvi_info;
@@ -1675,10 +1542,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1675 mvi_dev->dev_status = MVS_DEV_EH; 1542 mvi_dev->dev_status = MVS_DEV_EH;
1676 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1543 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1677 if (rc == TMF_RESP_FUNC_COMPLETE) { 1544 if (rc == TMF_RESP_FUNC_COMPLETE) {
1678 num = mvs_find_dev_phyno(dev, phyno);
1679 spin_lock_irqsave(&mvi->lock, flags); 1545 spin_lock_irqsave(&mvi->lock, flags);
1680 for (i = 0; i < num; i++) 1546 mvs_release_task(mvi, dev);
1681 mvs_release_task(mvi, dev);
1682 spin_unlock_irqrestore(&mvi->lock, flags); 1547 spin_unlock_irqrestore(&mvi->lock, flags);
1683 } 1548 }
1684 /* If failed, fall-through I_T_Nexus reset */ 1549 /* If failed, fall-through I_T_Nexus reset */
@@ -1696,11 +1561,12 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
1696 1561
1697 if (mvi_dev->dev_status != MVS_DEV_EH) 1562 if (mvi_dev->dev_status != MVS_DEV_EH)
1698 return TMF_RESP_FUNC_COMPLETE; 1563 return TMF_RESP_FUNC_COMPLETE;
1564 else
1565 mvi_dev->dev_status = MVS_DEV_NORMAL;
1699 rc = mvs_debug_I_T_nexus_reset(dev); 1566 rc = mvs_debug_I_T_nexus_reset(dev);
1700 mv_printk("%s for device[%x]:rc= %d\n", 1567 mv_printk("%s for device[%x]:rc= %d\n",
1701 __func__, mvi_dev->device_id, rc); 1568 __func__, mvi_dev->device_id, rc);
1702 1569
1703 /* housekeeper */
1704 spin_lock_irqsave(&mvi->lock, flags); 1570 spin_lock_irqsave(&mvi->lock, flags);
1705 mvs_release_task(mvi, dev); 1571 mvs_release_task(mvi, dev);
1706 spin_unlock_irqrestore(&mvi->lock, flags); 1572 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1739,9 +1605,6 @@ int mvs_query_task(struct sas_task *task)
1739 case TMF_RESP_FUNC_FAILED: 1605 case TMF_RESP_FUNC_FAILED:
1740 case TMF_RESP_FUNC_COMPLETE: 1606 case TMF_RESP_FUNC_COMPLETE:
1741 break; 1607 break;
1742 default:
1743 rc = TMF_RESP_FUNC_COMPLETE;
1744 break;
1745 } 1608 }
1746 } 1609 }
1747 mv_printk("%s:rc= %d\n", __func__, rc); 1610 mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1761,8 +1624,8 @@ int mvs_abort_task(struct sas_task *task)
1761 u32 tag; 1624 u32 tag;
1762 1625
1763 if (!mvi_dev) { 1626 if (!mvi_dev) {
1764 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__); 1627 mv_printk("Device has removed\n");
1765 rc = TMF_RESP_FUNC_FAILED; 1628 return TMF_RESP_FUNC_FAILED;
1766 } 1629 }
1767 1630
1768 mvi = mvi_dev->mvi_info; 1631 mvi = mvi_dev->mvi_info;
@@ -1807,25 +1670,17 @@ int mvs_abort_task(struct sas_task *task)
1807 1670
1808 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1671 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1809 task->task_proto & SAS_PROTOCOL_STP) { 1672 task->task_proto & SAS_PROTOCOL_STP) {
1810 /* to do free register_set */
1811 if (SATA_DEV == dev->dev_type) { 1673 if (SATA_DEV == dev->dev_type) {
1812 struct mvs_slot_info *slot = task->lldd_task; 1674 struct mvs_slot_info *slot = task->lldd_task;
1813 struct task_status_struct *tstat;
1814 u32 slot_idx = (u32)(slot - mvi->slot_info); 1675 u32 slot_idx = (u32)(slot - mvi->slot_info);
1815 tstat = &task->task_status; 1676 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
1816 mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
1817 "slot=%p slot_idx=x%x\n", 1677 "slot=%p slot_idx=x%x\n",
1818 mvi, task, slot, slot_idx); 1678 mvi, task, slot, slot_idx);
1819 tstat->stat = SAS_ABORTED_TASK; 1679 mvs_tmf_timedout((unsigned long)task);
1820 if (mvi_dev && mvi_dev->running_req)
1821 mvi_dev->running_req--;
1822 if (sas_protocol_ata(task->task_proto))
1823 mvs_free_reg_set(mvi, mvi_dev);
1824 mvs_slot_task_free(mvi, task, slot, slot_idx); 1680 mvs_slot_task_free(mvi, task, slot, slot_idx);
1825 return -1; 1681 rc = TMF_RESP_FUNC_COMPLETE;
1682 goto out;
1826 } 1683 }
1827 } else {
1828 /* SMP */
1829 1684
1830 } 1685 }
1831out: 1686out:
@@ -1891,12 +1746,63 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1891 return stat; 1746 return stat;
1892} 1747}
1893 1748
1749void mvs_set_sense(u8 *buffer, int len, int d_sense,
1750 int key, int asc, int ascq)
1751{
1752 memset(buffer, 0, len);
1753
1754 if (d_sense) {
1755 /* Descriptor format */
1756 if (len < 4) {
1757 mv_printk("Length %d of sense buffer too small to "
1758 "fit sense %x:%x:%x", len, key, asc, ascq);
1759 }
1760
1761 buffer[0] = 0x72; /* Response Code */
1762 if (len > 1)
1763 buffer[1] = key; /* Sense Key */
1764 if (len > 2)
1765 buffer[2] = asc; /* ASC */
1766 if (len > 3)
1767 buffer[3] = ascq; /* ASCQ */
1768 } else {
1769 if (len < 14) {
1770 mv_printk("Length %d of sense buffer too small to "
1771 "fit sense %x:%x:%x", len, key, asc, ascq);
1772 }
1773
1774 buffer[0] = 0x70; /* Response Code */
1775 if (len > 2)
1776 buffer[2] = key; /* Sense Key */
1777 if (len > 7)
1778 buffer[7] = 0x0a; /* Additional Sense Length */
1779 if (len > 12)
1780 buffer[12] = asc; /* ASC */
1781 if (len > 13)
1782 buffer[13] = ascq; /* ASCQ */
1783 }
1784
1785 return;
1786}
1787
1788void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
1789 u8 key, u8 asc, u8 asc_q)
1790{
1791 iu->datapres = 2;
1792 iu->response_data_len = 0;
1793 iu->sense_data_len = 17;
1794 iu->status = 02;
1795 mvs_set_sense(iu->sense_data, 17, 0,
1796 key, asc, asc_q);
1797}
1798
1894static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1799static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1895 u32 slot_idx) 1800 u32 slot_idx)
1896{ 1801{
1897 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1802 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1898 int stat; 1803 int stat;
1899 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); 1804 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
1805 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
1900 u32 tfs = 0; 1806 u32 tfs = 0;
1901 enum mvs_port_type type = PORT_TYPE_SAS; 1807 enum mvs_port_type type = PORT_TYPE_SAS;
1902 1808
@@ -1908,8 +1814,19 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1908 stat = SAM_STAT_CHECK_CONDITION; 1814 stat = SAM_STAT_CHECK_CONDITION;
1909 switch (task->task_proto) { 1815 switch (task->task_proto) {
1910 case SAS_PROTOCOL_SSP: 1816 case SAS_PROTOCOL_SSP:
1817 {
1911 stat = SAS_ABORTED_TASK; 1818 stat = SAS_ABORTED_TASK;
1819 if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
1820 struct ssp_response_iu *iu = slot->response +
1821 sizeof(struct mvs_err_info);
1822 mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
1823 sas_ssp_task_response(mvi->dev, task, iu);
1824 stat = SAM_STAT_CHECK_CONDITION;
1825 }
1826 if (err_dw1 & bit(31))
1827 mv_printk("reuse same slot, retry command.\n");
1912 break; 1828 break;
1829 }
1913 case SAS_PROTOCOL_SMP: 1830 case SAS_PROTOCOL_SMP:
1914 stat = SAM_STAT_CHECK_CONDITION; 1831 stat = SAM_STAT_CHECK_CONDITION;
1915 break; 1832 break;
@@ -1918,10 +1835,8 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1918 case SAS_PROTOCOL_STP: 1835 case SAS_PROTOCOL_STP:
1919 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1836 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1920 { 1837 {
1921 if (err_dw0 == 0x80400002)
1922 mv_printk("find reserved error, why?\n");
1923
1924 task->ata_task.use_ncq = 0; 1838 task->ata_task.use_ncq = 0;
1839 stat = SAS_PROTO_RESPONSE;
1925 mvs_sata_done(mvi, task, slot_idx, err_dw0); 1840 mvs_sata_done(mvi, task, slot_idx, err_dw0);
1926 } 1841 }
1927 break; 1842 break;
@@ -1945,8 +1860,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1945 void *to; 1860 void *to;
1946 enum exec_status sts; 1861 enum exec_status sts;
1947 1862
1948 if (mvi->exp_req)
1949 mvi->exp_req--;
1950 if (unlikely(!task || !task->lldd_task || !task->dev)) 1863 if (unlikely(!task || !task->lldd_task || !task->dev))
1951 return -1; 1864 return -1;
1952 1865
@@ -1954,8 +1867,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1954 dev = task->dev; 1867 dev = task->dev;
1955 mvi_dev = dev->lldd_dev; 1868 mvi_dev = dev->lldd_dev;
1956 1869
1957 mvs_hba_cq_dump(mvi);
1958
1959 spin_lock(&task->task_state_lock); 1870 spin_lock(&task->task_state_lock);
1960 task->task_state_flags &= 1871 task->task_state_flags &=
1961 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1872 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
@@ -1978,6 +1889,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1978 return -1; 1889 return -1;
1979 } 1890 }
1980 1891
1892 /* when no device attaching, go ahead and complete by error handling*/
1981 if (unlikely(!mvi_dev || flags)) { 1893 if (unlikely(!mvi_dev || flags)) {
1982 if (!mvi_dev) 1894 if (!mvi_dev)
1983 mv_dprintk("port has not device.\n"); 1895 mv_dprintk("port has not device.\n");
@@ -1987,6 +1899,9 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1987 1899
1988 /* error info record present */ 1900 /* error info record present */
1989 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { 1901 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1902 mv_dprintk("port %d slot %d rx_desc %X has error info"
1903 "%016llX.\n", slot->port->sas_port.id, slot_idx,
1904 rx_desc, (u64)(*(u64 *)slot->response));
1990 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1905 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1991 tstat->resp = SAS_TASK_COMPLETE; 1906 tstat->resp = SAS_TASK_COMPLETE;
1992 goto out; 1907 goto out;
@@ -2048,8 +1963,7 @@ out:
2048 spin_unlock(&mvi->lock); 1963 spin_unlock(&mvi->lock);
2049 if (task->task_done) 1964 if (task->task_done)
2050 task->task_done(task); 1965 task->task_done(task);
2051 else 1966
2052 mv_dprintk("why has not task_done.\n");
2053 spin_lock(&mvi->lock); 1967 spin_lock(&mvi->lock);
2054 1968
2055 return sts; 1969 return sts;
@@ -2092,7 +2006,6 @@ void mvs_release_task(struct mvs_info *mvi,
2092 struct domain_device *dev) 2006 struct domain_device *dev)
2093{ 2007{
2094 int i, phyno[WIDE_PORT_MAX_PHY], num; 2008 int i, phyno[WIDE_PORT_MAX_PHY], num;
2095 /* housekeeper */
2096 num = mvs_find_dev_phyno(dev, phyno); 2009 num = mvs_find_dev_phyno(dev, phyno);
2097 for (i = 0; i < num; i++) 2010 for (i = 0; i < num; i++)
2098 mvs_do_release_task(mvi, phyno[i], dev); 2011 mvs_do_release_task(mvi, phyno[i], dev);
@@ -2111,13 +2024,13 @@ static void mvs_work_queue(struct work_struct *work)
2111 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); 2024 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
2112 struct mvs_info *mvi = mwq->mvi; 2025 struct mvs_info *mvi = mwq->mvi;
2113 unsigned long flags; 2026 unsigned long flags;
2027 u32 phy_no = (unsigned long) mwq->data;
2028 struct sas_ha_struct *sas_ha = mvi->sas;
2029 struct mvs_phy *phy = &mvi->phy[phy_no];
2030 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2114 2031
2115 spin_lock_irqsave(&mvi->lock, flags); 2032 spin_lock_irqsave(&mvi->lock, flags);
2116 if (mwq->handler & PHY_PLUG_EVENT) { 2033 if (mwq->handler & PHY_PLUG_EVENT) {
2117 u32 phy_no = (unsigned long) mwq->data;
2118 struct sas_ha_struct *sas_ha = mvi->sas;
2119 struct mvs_phy *phy = &mvi->phy[phy_no];
2120 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2121 2034
2122 if (phy->phy_event & PHY_PLUG_OUT) { 2035 if (phy->phy_event & PHY_PLUG_OUT) {
2123 u32 tmp; 2036 u32 tmp;
@@ -2139,6 +2052,11 @@ static void mvs_work_queue(struct work_struct *work)
2139 mv_dprintk("phy%d Attached Device\n", phy_no); 2052 mv_dprintk("phy%d Attached Device\n", phy_no);
2140 } 2053 }
2141 } 2054 }
2055 } else if (mwq->handler & EXP_BRCT_CHG) {
2056 phy->phy_event &= ~EXP_BRCT_CHG;
2057 sas_ha->notify_port_event(sas_phy,
2058 PORTE_BROADCAST_RCVD);
2059 mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
2142 } 2060 }
2143 list_del(&mwq->entry); 2061 list_del(&mwq->entry);
2144 spin_unlock_irqrestore(&mvi->lock, flags); 2062 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -2174,29 +2092,21 @@ static void mvs_sig_time_out(unsigned long tphy)
2174 if (&mvi->phy[phy_no] == phy) { 2092 if (&mvi->phy[phy_no] == phy) {
2175 mv_dprintk("Get signature time out, reset phy %d\n", 2093 mv_dprintk("Get signature time out, reset phy %d\n",
2176 phy_no+mvi->id*mvi->chip->n_phy); 2094 phy_no+mvi->id*mvi->chip->n_phy);
2177 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); 2095 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
2178 } 2096 }
2179 } 2097 }
2180} 2098}
2181 2099
2182static void mvs_sig_remove_timer(struct mvs_phy *phy)
2183{
2184 if (phy->timer.function)
2185 del_timer(&phy->timer);
2186 phy->timer.function = NULL;
2187}
2188
2189void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 2100void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2190{ 2101{
2191 u32 tmp; 2102 u32 tmp;
2192 struct sas_ha_struct *sas_ha = mvi->sas;
2193 struct mvs_phy *phy = &mvi->phy[phy_no]; 2103 struct mvs_phy *phy = &mvi->phy[phy_no];
2194 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2195 2104
2196 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); 2105 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2197 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, 2106 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2107 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2198 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); 2108 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2199 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, 2109 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
2200 phy->irq_status); 2110 phy->irq_status);
2201 2111
2202 /* 2112 /*
@@ -2205,11 +2115,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2205 */ 2115 */
2206 2116
2207 if (phy->irq_status & PHYEV_DCDR_ERR) { 2117 if (phy->irq_status & PHYEV_DCDR_ERR) {
2208 mv_dprintk("port %d STP decoding error.\n", 2118 mv_dprintk("phy %d STP decoding error.\n",
2209 phy_no + mvi->id*mvi->chip->n_phy); 2119 phy_no + mvi->id*mvi->chip->n_phy);
2210 } 2120 }
2211 2121
2212 if (phy->irq_status & PHYEV_POOF) { 2122 if (phy->irq_status & PHYEV_POOF) {
2123 mdelay(500);
2213 if (!(phy->phy_event & PHY_PLUG_OUT)) { 2124 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2214 int dev_sata = phy->phy_type & PORT_TYPE_SATA; 2125 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2215 int ready; 2126 int ready;
@@ -2220,17 +2131,13 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2220 (void *)(unsigned long)phy_no, 2131 (void *)(unsigned long)phy_no,
2221 PHY_PLUG_EVENT); 2132 PHY_PLUG_EVENT);
2222 ready = mvs_is_phy_ready(mvi, phy_no); 2133 ready = mvs_is_phy_ready(mvi, phy_no);
2223 if (!ready)
2224 mv_dprintk("phy%d Unplug Notice\n",
2225 phy_no +
2226 mvi->id * mvi->chip->n_phy);
2227 if (ready || dev_sata) { 2134 if (ready || dev_sata) {
2228 if (MVS_CHIP_DISP->stp_reset) 2135 if (MVS_CHIP_DISP->stp_reset)
2229 MVS_CHIP_DISP->stp_reset(mvi, 2136 MVS_CHIP_DISP->stp_reset(mvi,
2230 phy_no); 2137 phy_no);
2231 else 2138 else
2232 MVS_CHIP_DISP->phy_reset(mvi, 2139 MVS_CHIP_DISP->phy_reset(mvi,
2233 phy_no, 0); 2140 phy_no, MVS_SOFT_RESET);
2234 return; 2141 return;
2235 } 2142 }
2236 } 2143 }
@@ -2243,13 +2150,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2243 if (phy->timer.function == NULL) { 2150 if (phy->timer.function == NULL) {
2244 phy->timer.data = (unsigned long)phy; 2151 phy->timer.data = (unsigned long)phy;
2245 phy->timer.function = mvs_sig_time_out; 2152 phy->timer.function = mvs_sig_time_out;
2246 phy->timer.expires = jiffies + 10*HZ; 2153 phy->timer.expires = jiffies + 5*HZ;
2247 add_timer(&phy->timer); 2154 add_timer(&phy->timer);
2248 } 2155 }
2249 } 2156 }
2250 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 2157 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2251 phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 2158 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2252 mvs_sig_remove_timer(phy);
2253 mv_dprintk("notify plug in on phy[%d]\n", phy_no); 2159 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2254 if (phy->phy_status) { 2160 if (phy->phy_status) {
2255 mdelay(10); 2161 mdelay(10);
@@ -2263,14 +2169,14 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2263 } 2169 }
2264 mvs_update_phyinfo(mvi, phy_no, 0); 2170 mvs_update_phyinfo(mvi, phy_no, 0);
2265 if (phy->phy_type & PORT_TYPE_SAS) { 2171 if (phy->phy_type & PORT_TYPE_SAS) {
2266 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2); 2172 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
2267 mdelay(10); 2173 mdelay(10);
2268 } 2174 }
2269 2175
2270 mvs_bytes_dmaed(mvi, phy_no); 2176 mvs_bytes_dmaed(mvi, phy_no);
2271 /* whether driver is going to handle hot plug */ 2177 /* whether driver is going to handle hot plug */
2272 if (phy->phy_event & PHY_PLUG_OUT) { 2178 if (phy->phy_event & PHY_PLUG_OUT) {
2273 mvs_port_notify_formed(sas_phy, 0); 2179 mvs_port_notify_formed(&phy->sas_phy, 0);
2274 phy->phy_event &= ~PHY_PLUG_OUT; 2180 phy->phy_event &= ~PHY_PLUG_OUT;
2275 } 2181 }
2276 } else { 2182 } else {
@@ -2278,13 +2184,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2278 phy_no + mvi->id*mvi->chip->n_phy); 2184 phy_no + mvi->id*mvi->chip->n_phy);
2279 } 2185 }
2280 } else if (phy->irq_status & PHYEV_BROAD_CH) { 2186 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2281 mv_dprintk("port %d broadcast change.\n", 2187 mv_dprintk("phy %d broadcast change.\n",
2282 phy_no + mvi->id*mvi->chip->n_phy); 2188 phy_no + mvi->id*mvi->chip->n_phy);
2283 /* exception for Samsung disk drive*/ 2189 mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
2284 mdelay(1000); 2190 EXP_BRCT_CHG);
2285 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2286 } 2191 }
2287 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2288} 2192}
2289 2193
2290int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 2194int mvs_int_rx(struct mvs_info *mvi, bool self_clear)