diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-23 15:29:16 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-23 15:29:16 -0500 |
commit | b23c9cc0ce652089a2f0af8c7f1541f10dc9b5db (patch) | |
tree | d2e429bbb74c268c071790761e4a2b59d96ea3f7 /drivers | |
parent | 1e8352784abaedb424e63fa700e93e6c1307785f (diff) | |
parent | cfb37ae1e9d31fe2c1d21734ab51405e0c3afb7e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6: (25 commits)
[SCSI] qlogicpt: section fixes
[SCSI] mvsas: convert from rough draft to working driver
[SCSI] mvsas: Add Marvell 6440 SAS/SATA driver
[SCSI] libsas: correctly flush the LU queue on error recovery
[SCSI] aic94xx: fix sequencer hang on error recovery
[SCSI] st: compile fix when DEBUG set to one
[SCSI] stex: stex_internal_copy should be called with sg_count in struct st_ccb
[SCSI] stex: stex_direct_copy shouldn't call dma_map_sg
[SCSI] lpfc: Balance locking
[SCSI] qla4xxx: fix up residual handling
[SCSI] libsas: fix error handling
[SCSI] arcmsr: fix message allocation
[SCSI] mptbase: fix use-after-free's
[SCSI] iscsi transport: make 2 functions static
[SCSI] lpfc: make lpfc_disable_node() static
[SCSI] ips: fix data buffer accessors conversion bug
[SCSI] gdth: don't call pci_free_consistent under spinlock
[SCSI] qla2xxx: fix compile warning for printk format
[SCSI] aic7xx: mitigate HOST_MSG_LOOP invalid SCB ff panic
[SCSI] scsi_debug: disable clustering
...
Diffstat (limited to 'drivers')
26 files changed, 3184 insertions, 183 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index bfda731696f7..0c303c84b37b 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -1481,15 +1481,15 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1481 | 1481 | ||
1482 | ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); | 1482 | ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); |
1483 | if (pci_enable_device_mem(pdev)) { | 1483 | if (pci_enable_device_mem(pdev)) { |
1484 | kfree(ioc); | ||
1485 | printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() " | 1484 | printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() " |
1486 | "failed\n", ioc->name); | 1485 | "failed\n", ioc->name); |
1486 | kfree(ioc); | ||
1487 | return r; | 1487 | return r; |
1488 | } | 1488 | } |
1489 | if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) { | 1489 | if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) { |
1490 | kfree(ioc); | ||
1491 | printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with " | 1490 | printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with " |
1492 | "MEM failed\n", ioc->name); | 1491 | "MEM failed\n", ioc->name); |
1492 | kfree(ioc); | ||
1493 | return r; | 1493 | return r; |
1494 | } | 1494 | } |
1495 | 1495 | ||
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index d83ea96fe135..caadc68c3000 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h | |||
@@ -923,7 +923,7 @@ extern struct proc_dir_entry *mpt_proc_root_dir; | |||
923 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 923 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
924 | #endif /* } __KERNEL__ */ | 924 | #endif /* } __KERNEL__ */ |
925 | 925 | ||
926 | #if defined(__alpha__) || defined(__sparc_v9__) || defined(__ia64__) || defined(__x86_64__) || defined(__powerpc__) | 926 | #ifdef CONFIG_64BIT |
927 | #define CAST_U32_TO_PTR(x) ((void *)(u64)x) | 927 | #define CAST_U32_TO_PTR(x) ((void *)(u64)x) |
928 | #define CAST_PTR_TO_U32(x) ((u32)(u64)x) | 928 | #define CAST_PTR_TO_U32(x) ((u32)(u64)x) |
929 | #else | 929 | #else |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a7a0813b24cb..c46666a24809 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -992,6 +992,16 @@ config SCSI_IZIP_SLOW_CTR | |||
992 | 992 | ||
993 | Generally, saying N is fine. | 993 | Generally, saying N is fine. |
994 | 994 | ||
995 | config SCSI_MVSAS | ||
996 | tristate "Marvell 88SE6440 SAS/SATA support" | ||
997 | depends on PCI && SCSI | ||
998 | select SCSI_SAS_LIBSAS | ||
999 | help | ||
1000 | This driver supports Marvell SAS/SATA PCI devices. | ||
1001 | |||
1002 | To compiler this driver as a module, choose M here: the module | ||
1003 | will be called mvsas. | ||
1004 | |||
995 | config SCSI_NCR53C406A | 1005 | config SCSI_NCR53C406A |
996 | tristate "NCR53c406a SCSI support" | 1006 | tristate "NCR53c406a SCSI support" |
997 | depends on ISA && SCSI | 1007 | depends on ISA && SCSI |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 925c26b4fff9..23e6ecbd4778 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -119,6 +119,7 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ | |||
119 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ | 119 | obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ |
120 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o | 120 | obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o |
121 | obj-$(CONFIG_SCSI_STEX) += stex.o | 121 | obj-$(CONFIG_SCSI_STEX) += stex.o |
122 | obj-$(CONFIG_SCSI_MVSAS) += mvsas.o | ||
122 | obj-$(CONFIG_PS3_ROM) += ps3rom.o | 123 | obj-$(CONFIG_PS3_ROM) += ps3rom.o |
123 | 124 | ||
124 | obj-$(CONFIG_ARM) += arm/ | 125 | obj-$(CONFIG_ARM) += arm/ |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 6d2ae641273c..64e62ce59c15 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c | |||
@@ -695,15 +695,16 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) | |||
695 | scb_index = ahc_inb(ahc, SCB_TAG); | 695 | scb_index = ahc_inb(ahc, SCB_TAG); |
696 | scb = ahc_lookup_scb(ahc, scb_index); | 696 | scb = ahc_lookup_scb(ahc, scb_index); |
697 | if (devinfo.role == ROLE_INITIATOR) { | 697 | if (devinfo.role == ROLE_INITIATOR) { |
698 | if (scb == NULL) | 698 | if (bus_phase == P_MESGOUT) { |
699 | panic("HOST_MSG_LOOP with " | 699 | if (scb == NULL) |
700 | "invalid SCB %x\n", scb_index); | 700 | panic("HOST_MSG_LOOP with " |
701 | "invalid SCB %x\n", | ||
702 | scb_index); | ||
701 | 703 | ||
702 | if (bus_phase == P_MESGOUT) | ||
703 | ahc_setup_initiator_msgout(ahc, | 704 | ahc_setup_initiator_msgout(ahc, |
704 | &devinfo, | 705 | &devinfo, |
705 | scb); | 706 | scb); |
706 | else { | 707 | } else { |
707 | ahc->msg_type = | 708 | ahc->msg_type = |
708 | MSG_TYPE_INITIATOR_MSGIN; | 709 | MSG_TYPE_INITIATOR_MSGIN; |
709 | ahc->msgin_index = 0; | 710 | ahc->msgin_index = 0; |
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index 0febad4dd75f..ab350504ca5a 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c | |||
@@ -458,13 +458,19 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, | |||
458 | tc_abort = le16_to_cpu(tc_abort); | 458 | tc_abort = le16_to_cpu(tc_abort); |
459 | 459 | ||
460 | list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { | 460 | list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { |
461 | struct sas_task *task = ascb->uldd_task; | 461 | struct sas_task *task = a->uldd_task; |
462 | |||
463 | if (a->tc_index != tc_abort) | ||
464 | continue; | ||
462 | 465 | ||
463 | if (task && a->tc_index == tc_abort) { | 466 | if (task) { |
464 | failed_dev = task->dev; | 467 | failed_dev = task->dev; |
465 | sas_task_abort(task); | 468 | sas_task_abort(task); |
466 | break; | 469 | } else { |
470 | ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n", | ||
471 | a->scb->header.opcode); | ||
467 | } | 472 | } |
473 | break; | ||
468 | } | 474 | } |
469 | 475 | ||
470 | if (!failed_dev) { | 476 | if (!failed_dev) { |
@@ -478,7 +484,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb, | |||
478 | * that the EH will wake up and do something. | 484 | * that the EH will wake up and do something. |
479 | */ | 485 | */ |
480 | list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { | 486 | list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { |
481 | struct sas_task *task = ascb->uldd_task; | 487 | struct sas_task *task = a->uldd_task; |
482 | 488 | ||
483 | if (task && | 489 | if (task && |
484 | task->dev == failed_dev && | 490 | task->dev == failed_dev && |
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index b52124f3d3ac..144f5ad20453 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c | |||
@@ -151,8 +151,6 @@ static int asd_clear_nexus_I_T(struct domain_device *dev) | |||
151 | CLEAR_NEXUS_PRE; | 151 | CLEAR_NEXUS_PRE; |
152 | scb->clear_nexus.nexus = NEXUS_I_T; | 152 | scb->clear_nexus.nexus = NEXUS_I_T; |
153 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; | 153 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; |
154 | if (dev->tproto) | ||
155 | scb->clear_nexus.flags |= SUSPEND_TX; | ||
156 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) | 154 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) |
157 | dev->lldd_dev); | 155 | dev->lldd_dev); |
158 | CLEAR_NEXUS_POST; | 156 | CLEAR_NEXUS_POST; |
@@ -169,8 +167,6 @@ static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) | |||
169 | CLEAR_NEXUS_PRE; | 167 | CLEAR_NEXUS_PRE; |
170 | scb->clear_nexus.nexus = NEXUS_I_T_L; | 168 | scb->clear_nexus.nexus = NEXUS_I_T_L; |
171 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; | 169 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; |
172 | if (dev->tproto) | ||
173 | scb->clear_nexus.flags |= SUSPEND_TX; | ||
174 | memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); | 170 | memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); |
175 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) | 171 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) |
176 | dev->lldd_dev); | 172 | dev->lldd_dev); |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 4f9ff32cfed0..f91f79c8007d 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -1387,18 +1387,16 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1387 | switch(controlcode) { | 1387 | switch(controlcode) { |
1388 | 1388 | ||
1389 | case ARCMSR_MESSAGE_READ_RQBUFFER: { | 1389 | case ARCMSR_MESSAGE_READ_RQBUFFER: { |
1390 | unsigned long *ver_addr; | 1390 | unsigned char *ver_addr; |
1391 | uint8_t *pQbuffer, *ptmpQbuffer; | 1391 | uint8_t *pQbuffer, *ptmpQbuffer; |
1392 | int32_t allxfer_len = 0; | 1392 | int32_t allxfer_len = 0; |
1393 | void *tmp; | ||
1394 | 1393 | ||
1395 | tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); | 1394 | ver_addr = kmalloc(1032, GFP_ATOMIC); |
1396 | ver_addr = (unsigned long *)tmp; | 1395 | if (!ver_addr) { |
1397 | if (!tmp) { | ||
1398 | retvalue = ARCMSR_MESSAGE_FAIL; | 1396 | retvalue = ARCMSR_MESSAGE_FAIL; |
1399 | goto message_out; | 1397 | goto message_out; |
1400 | } | 1398 | } |
1401 | ptmpQbuffer = (uint8_t *) ver_addr; | 1399 | ptmpQbuffer = ver_addr; |
1402 | while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) | 1400 | while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) |
1403 | && (allxfer_len < 1031)) { | 1401 | && (allxfer_len < 1031)) { |
1404 | pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; | 1402 | pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; |
@@ -1427,26 +1425,24 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1427 | } | 1425 | } |
1428 | arcmsr_iop_message_read(acb); | 1426 | arcmsr_iop_message_read(acb); |
1429 | } | 1427 | } |
1430 | memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); | 1428 | memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len); |
1431 | pcmdmessagefld->cmdmessage.Length = allxfer_len; | 1429 | pcmdmessagefld->cmdmessage.Length = allxfer_len; |
1432 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; | 1430 | pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; |
1433 | kfree(tmp); | 1431 | kfree(ver_addr); |
1434 | } | 1432 | } |
1435 | break; | 1433 | break; |
1436 | 1434 | ||
1437 | case ARCMSR_MESSAGE_WRITE_WQBUFFER: { | 1435 | case ARCMSR_MESSAGE_WRITE_WQBUFFER: { |
1438 | unsigned long *ver_addr; | 1436 | unsigned char *ver_addr; |
1439 | int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; | 1437 | int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; |
1440 | uint8_t *pQbuffer, *ptmpuserbuffer; | 1438 | uint8_t *pQbuffer, *ptmpuserbuffer; |
1441 | void *tmp; | ||
1442 | 1439 | ||
1443 | tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); | 1440 | ver_addr = kmalloc(1032, GFP_ATOMIC); |
1444 | ver_addr = (unsigned long *)tmp; | 1441 | if (!ver_addr) { |
1445 | if (!tmp) { | ||
1446 | retvalue = ARCMSR_MESSAGE_FAIL; | 1442 | retvalue = ARCMSR_MESSAGE_FAIL; |
1447 | goto message_out; | 1443 | goto message_out; |
1448 | } | 1444 | } |
1449 | ptmpuserbuffer = (uint8_t *)ver_addr; | 1445 | ptmpuserbuffer = ver_addr; |
1450 | user_len = pcmdmessagefld->cmdmessage.Length; | 1446 | user_len = pcmdmessagefld->cmdmessage.Length; |
1451 | memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); | 1447 | memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); |
1452 | wqbuf_lastindex = acb->wqbuf_lastindex; | 1448 | wqbuf_lastindex = acb->wqbuf_lastindex; |
@@ -1492,7 +1488,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1492 | retvalue = ARCMSR_MESSAGE_FAIL; | 1488 | retvalue = ARCMSR_MESSAGE_FAIL; |
1493 | } | 1489 | } |
1494 | } | 1490 | } |
1495 | kfree(tmp); | 1491 | kfree(ver_addr); |
1496 | } | 1492 | } |
1497 | break; | 1493 | break; |
1498 | 1494 | ||
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index 3e73e264972e..b65f4cf0eec9 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h | |||
@@ -313,7 +313,7 @@ typedef struct { | |||
313 | 313 | ||
314 | /* miscellaneous */ | 314 | /* miscellaneous */ |
315 | int internal_done; /* flag to indicate request done */ | 315 | int internal_done; /* flag to indicate request done */ |
316 | struct scsi_eh_save *ses; /* holds request sense restore info */ | 316 | struct scsi_eh_save ses; /* holds request sense restore info */ |
317 | unsigned long magic_end; | 317 | unsigned long magic_end; |
318 | } FAS216_Info; | 318 | } FAS216_Info; |
319 | 319 | ||
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index de5773443c62..ce0228e26aec 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -694,15 +694,13 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr) | |||
694 | { | 694 | { |
695 | ulong flags; | 695 | ulong flags; |
696 | 696 | ||
697 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
698 | |||
699 | if (buf == ha->pscratch) { | 697 | if (buf == ha->pscratch) { |
698 | spin_lock_irqsave(&ha->smp_lock, flags); | ||
700 | ha->scratch_busy = FALSE; | 699 | ha->scratch_busy = FALSE; |
700 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
701 | } else { | 701 | } else { |
702 | pci_free_consistent(ha->pdev, size, buf, paddr); | 702 | pci_free_consistent(ha->pdev, size, buf, paddr); |
703 | } | 703 | } |
704 | |||
705 | spin_unlock_irqrestore(&ha->smp_lock, flags); | ||
706 | } | 704 | } |
707 | 705 | ||
708 | #ifdef GDTH_IOCTL_PROC | 706 | #ifdef GDTH_IOCTL_PROC |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index bb152fb9fec7..7ed568f180ae 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -1576,7 +1576,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr) | |||
1576 | METHOD_TRACE("ips_make_passthru", 1); | 1576 | METHOD_TRACE("ips_make_passthru", 1); |
1577 | 1577 | ||
1578 | scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) | 1578 | scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) |
1579 | length += sg[i].length; | 1579 | length += sg->length; |
1580 | 1580 | ||
1581 | if (length < sizeof (ips_passthru_t)) { | 1581 | if (length < sizeof (ips_passthru_t)) { |
1582 | /* wrong size */ | 1582 | /* wrong size */ |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index f869fba86807..704ea06a6e50 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -51,10 +51,14 @@ static void sas_scsi_task_done(struct sas_task *task) | |||
51 | { | 51 | { |
52 | struct task_status_struct *ts = &task->task_status; | 52 | struct task_status_struct *ts = &task->task_status; |
53 | struct scsi_cmnd *sc = task->uldd_task; | 53 | struct scsi_cmnd *sc = task->uldd_task; |
54 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host); | ||
55 | unsigned ts_flags = task->task_state_flags; | ||
56 | int hs = 0, stat = 0; | 54 | int hs = 0, stat = 0; |
57 | 55 | ||
56 | if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { | ||
57 | /* Aborted tasks will be completed by the error handler */ | ||
58 | SAS_DPRINTK("task done but aborted\n"); | ||
59 | return; | ||
60 | } | ||
61 | |||
58 | if (unlikely(!sc)) { | 62 | if (unlikely(!sc)) { |
59 | SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); | 63 | SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); |
60 | list_del_init(&task->list); | 64 | list_del_init(&task->list); |
@@ -120,11 +124,7 @@ static void sas_scsi_task_done(struct sas_task *task) | |||
120 | sc->result = (hs << 16) | stat; | 124 | sc->result = (hs << 16) | stat; |
121 | list_del_init(&task->list); | 125 | list_del_init(&task->list); |
122 | sas_free_task(task); | 126 | sas_free_task(task); |
123 | /* This is very ugly but this is how SCSI Core works. */ | 127 | sc->scsi_done(sc); |
124 | if (ts_flags & SAS_TASK_STATE_ABORTED) | ||
125 | scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q); | ||
126 | else | ||
127 | sc->scsi_done(sc); | ||
128 | } | 128 | } |
129 | 129 | ||
130 | static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) | 130 | static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) |
@@ -255,13 +255,34 @@ out: | |||
255 | return res; | 255 | return res; |
256 | } | 256 | } |
257 | 257 | ||
258 | static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) | ||
259 | { | ||
260 | struct sas_task *task = TO_SAS_TASK(cmd); | ||
261 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); | ||
262 | |||
263 | /* remove the aborted task flag to allow the task to be | ||
264 | * completed now. At this point, we only get called following | ||
265 | * an actual abort of the task, so we should be guaranteed not | ||
266 | * to be racing with any completions from the LLD (hence we | ||
267 | * don't need the task state lock to clear the flag) */ | ||
268 | task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; | ||
269 | /* Now call task_done. However, task will be free'd after | ||
270 | * this */ | ||
271 | task->task_done(task); | ||
272 | /* now finish the command and move it on to the error | ||
273 | * handler done list, this also takes it off the | ||
274 | * error handler pending list */ | ||
275 | scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); | ||
276 | } | ||
277 | |||
258 | static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) | 278 | static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) |
259 | { | 279 | { |
260 | struct scsi_cmnd *cmd, *n; | 280 | struct scsi_cmnd *cmd, *n; |
261 | 281 | ||
262 | list_for_each_entry_safe(cmd, n, error_q, eh_entry) { | 282 | list_for_each_entry_safe(cmd, n, error_q, eh_entry) { |
263 | if (cmd == my_cmd) | 283 | if (cmd->device->sdev_target == my_cmd->device->sdev_target && |
264 | list_del_init(&cmd->eh_entry); | 284 | cmd->device->lun == my_cmd->device->lun) |
285 | sas_eh_finish_cmd(cmd); | ||
265 | } | 286 | } |
266 | } | 287 | } |
267 | 288 | ||
@@ -274,7 +295,7 @@ static void sas_scsi_clear_queue_I_T(struct list_head *error_q, | |||
274 | struct domain_device *x = cmd_to_domain_dev(cmd); | 295 | struct domain_device *x = cmd_to_domain_dev(cmd); |
275 | 296 | ||
276 | if (x == dev) | 297 | if (x == dev) |
277 | list_del_init(&cmd->eh_entry); | 298 | sas_eh_finish_cmd(cmd); |
278 | } | 299 | } |
279 | } | 300 | } |
280 | 301 | ||
@@ -288,7 +309,7 @@ static void sas_scsi_clear_queue_port(struct list_head *error_q, | |||
288 | struct asd_sas_port *x = dev->port; | 309 | struct asd_sas_port *x = dev->port; |
289 | 310 | ||
290 | if (x == port) | 311 | if (x == port) |
291 | list_del_init(&cmd->eh_entry); | 312 | sas_eh_finish_cmd(cmd); |
292 | } | 313 | } |
293 | } | 314 | } |
294 | 315 | ||
@@ -528,14 +549,14 @@ Again: | |||
528 | case TASK_IS_DONE: | 549 | case TASK_IS_DONE: |
529 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, | 550 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, |
530 | task); | 551 | task); |
531 | task->task_done(task); | 552 | sas_eh_finish_cmd(cmd); |
532 | if (need_reset) | 553 | if (need_reset) |
533 | try_to_reset_cmd_device(shost, cmd); | 554 | try_to_reset_cmd_device(shost, cmd); |
534 | continue; | 555 | continue; |
535 | case TASK_IS_ABORTED: | 556 | case TASK_IS_ABORTED: |
536 | SAS_DPRINTK("%s: task 0x%p is aborted\n", | 557 | SAS_DPRINTK("%s: task 0x%p is aborted\n", |
537 | __FUNCTION__, task); | 558 | __FUNCTION__, task); |
538 | task->task_done(task); | 559 | sas_eh_finish_cmd(cmd); |
539 | if (need_reset) | 560 | if (need_reset) |
540 | try_to_reset_cmd_device(shost, cmd); | 561 | try_to_reset_cmd_device(shost, cmd); |
541 | continue; | 562 | continue; |
@@ -547,7 +568,7 @@ Again: | |||
547 | "recovered\n", | 568 | "recovered\n", |
548 | SAS_ADDR(task->dev), | 569 | SAS_ADDR(task->dev), |
549 | cmd->device->lun); | 570 | cmd->device->lun); |
550 | task->task_done(task); | 571 | sas_eh_finish_cmd(cmd); |
551 | if (need_reset) | 572 | if (need_reset) |
552 | try_to_reset_cmd_device(shost, cmd); | 573 | try_to_reset_cmd_device(shost, cmd); |
553 | sas_scsi_clear_queue_lu(work_q, cmd); | 574 | sas_scsi_clear_queue_lu(work_q, cmd); |
@@ -562,7 +583,7 @@ Again: | |||
562 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 583 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
563 | SAS_DPRINTK("I_T %016llx recovered\n", | 584 | SAS_DPRINTK("I_T %016llx recovered\n", |
564 | SAS_ADDR(task->dev->sas_addr)); | 585 | SAS_ADDR(task->dev->sas_addr)); |
565 | task->task_done(task); | 586 | sas_eh_finish_cmd(cmd); |
566 | if (need_reset) | 587 | if (need_reset) |
567 | try_to_reset_cmd_device(shost, cmd); | 588 | try_to_reset_cmd_device(shost, cmd); |
568 | sas_scsi_clear_queue_I_T(work_q, task->dev); | 589 | sas_scsi_clear_queue_I_T(work_q, task->dev); |
@@ -577,7 +598,7 @@ Again: | |||
577 | if (res == TMF_RESP_FUNC_COMPLETE) { | 598 | if (res == TMF_RESP_FUNC_COMPLETE) { |
578 | SAS_DPRINTK("clear nexus port:%d " | 599 | SAS_DPRINTK("clear nexus port:%d " |
579 | "succeeded\n", port->id); | 600 | "succeeded\n", port->id); |
580 | task->task_done(task); | 601 | sas_eh_finish_cmd(cmd); |
581 | if (need_reset) | 602 | if (need_reset) |
582 | try_to_reset_cmd_device(shost, cmd); | 603 | try_to_reset_cmd_device(shost, cmd); |
583 | sas_scsi_clear_queue_port(work_q, | 604 | sas_scsi_clear_queue_port(work_q, |
@@ -591,10 +612,10 @@ Again: | |||
591 | if (res == TMF_RESP_FUNC_COMPLETE) { | 612 | if (res == TMF_RESP_FUNC_COMPLETE) { |
592 | SAS_DPRINTK("clear nexus ha " | 613 | SAS_DPRINTK("clear nexus ha " |
593 | "succeeded\n"); | 614 | "succeeded\n"); |
594 | task->task_done(task); | 615 | sas_eh_finish_cmd(cmd); |
595 | if (need_reset) | 616 | if (need_reset) |
596 | try_to_reset_cmd_device(shost, cmd); | 617 | try_to_reset_cmd_device(shost, cmd); |
597 | goto out; | 618 | goto clear_q; |
598 | } | 619 | } |
599 | } | 620 | } |
600 | /* If we are here -- this means that no amount | 621 | /* If we are here -- this means that no amount |
@@ -606,21 +627,18 @@ Again: | |||
606 | SAS_ADDR(task->dev->sas_addr), | 627 | SAS_ADDR(task->dev->sas_addr), |
607 | cmd->device->lun); | 628 | cmd->device->lun); |
608 | 629 | ||
609 | task->task_done(task); | 630 | sas_eh_finish_cmd(cmd); |
610 | if (need_reset) | 631 | if (need_reset) |
611 | try_to_reset_cmd_device(shost, cmd); | 632 | try_to_reset_cmd_device(shost, cmd); |
612 | goto clear_q; | 633 | goto clear_q; |
613 | } | 634 | } |
614 | } | 635 | } |
615 | out: | ||
616 | return list_empty(work_q); | 636 | return list_empty(work_q); |
617 | clear_q: | 637 | clear_q: |
618 | SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); | 638 | SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); |
619 | list_for_each_entry_safe(cmd, n, work_q, eh_entry) { | 639 | list_for_each_entry_safe(cmd, n, work_q, eh_entry) |
620 | struct sas_task *task = TO_SAS_TASK(cmd); | 640 | sas_eh_finish_cmd(cmd); |
621 | list_del_init(&cmd->eh_entry); | 641 | |
622 | task->task_done(task); | ||
623 | } | ||
624 | return list_empty(work_q); | 642 | return list_empty(work_q); |
625 | } | 643 | } |
626 | 644 | ||
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 848d97744b4d..0819f5f39de5 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -55,7 +55,6 @@ void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
55 | void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 55 | void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
56 | void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); | 56 | void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); |
57 | void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); | 57 | void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); |
58 | void lpfc_disable_node(struct lpfc_vport *, struct lpfc_nodelist *); | ||
59 | struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, | 58 | struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, |
60 | struct lpfc_nodelist *, int); | 59 | struct lpfc_nodelist *, int); |
61 | void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); | 60 | void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index bd572d6b60af..976653440fba 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -1694,7 +1694,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
1694 | NLP_STE_UNUSED_NODE); | 1694 | NLP_STE_UNUSED_NODE); |
1695 | } | 1695 | } |
1696 | 1696 | ||
1697 | void | 1697 | static void |
1698 | lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | 1698 | lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
1699 | { | 1699 | { |
1700 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) | 1700 | if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f53206411cd8..fc0d9501aba6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -648,28 +648,24 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) | |||
648 | unsigned long flags; | 648 | unsigned long flags; |
649 | struct hbq_dmabuf *hbq_buffer; | 649 | struct hbq_dmabuf *hbq_buffer; |
650 | 650 | ||
651 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) { | 651 | if (!phba->hbqs[hbqno].hbq_alloc_buffer) |
652 | return 0; | 652 | return 0; |
653 | } | ||
654 | 653 | ||
655 | start = phba->hbqs[hbqno].buffer_count; | 654 | start = phba->hbqs[hbqno].buffer_count; |
656 | end = count + start; | 655 | end = count + start; |
657 | if (end > lpfc_hbq_defs[hbqno]->entry_count) { | 656 | if (end > lpfc_hbq_defs[hbqno]->entry_count) |
658 | end = lpfc_hbq_defs[hbqno]->entry_count; | 657 | end = lpfc_hbq_defs[hbqno]->entry_count; |
659 | } | ||
660 | 658 | ||
661 | /* Check whether HBQ is still in use */ | 659 | /* Check whether HBQ is still in use */ |
662 | spin_lock_irqsave(&phba->hbalock, flags); | 660 | spin_lock_irqsave(&phba->hbalock, flags); |
663 | if (!phba->hbq_in_use) { | 661 | if (!phba->hbq_in_use) |
664 | spin_unlock_irqrestore(&phba->hbalock, flags); | 662 | goto out; |
665 | return 0; | ||
666 | } | ||
667 | 663 | ||
668 | /* Populate HBQ entries */ | 664 | /* Populate HBQ entries */ |
669 | for (i = start; i < end; i++) { | 665 | for (i = start; i < end; i++) { |
670 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); | 666 | hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); |
671 | if (!hbq_buffer) | 667 | if (!hbq_buffer) |
672 | return 1; | 668 | goto err; |
673 | hbq_buffer->tag = (i | (hbqno << 16)); | 669 | hbq_buffer->tag = (i | (hbqno << 16)); |
674 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) | 670 | if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) |
675 | phba->hbqs[hbqno].buffer_count++; | 671 | phba->hbqs[hbqno].buffer_count++; |
@@ -677,8 +673,12 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) | |||
677 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); | 673 | (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); |
678 | } | 674 | } |
679 | 675 | ||
676 | out: | ||
680 | spin_unlock_irqrestore(&phba->hbalock, flags); | 677 | spin_unlock_irqrestore(&phba->hbalock, flags); |
681 | return 0; | 678 | return 0; |
679 | err: | ||
680 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
681 | return 1; | ||
682 | } | 682 | } |
683 | 683 | ||
684 | int | 684 | int |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 4d59ae8491a4..b135a1ed4b2c 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -151,19 +151,19 @@ mega_setup_mailbox(adapter_t *adapter) | |||
151 | */ | 151 | */ |
152 | if( adapter->flag & BOARD_IOMAP ) { | 152 | if( adapter->flag & BOARD_IOMAP ) { |
153 | 153 | ||
154 | outb_p(adapter->mbox_dma & 0xFF, | 154 | outb(adapter->mbox_dma & 0xFF, |
155 | adapter->host->io_port + MBOX_PORT0); | 155 | adapter->host->io_port + MBOX_PORT0); |
156 | 156 | ||
157 | outb_p((adapter->mbox_dma >> 8) & 0xFF, | 157 | outb((adapter->mbox_dma >> 8) & 0xFF, |
158 | adapter->host->io_port + MBOX_PORT1); | 158 | adapter->host->io_port + MBOX_PORT1); |
159 | 159 | ||
160 | outb_p((adapter->mbox_dma >> 16) & 0xFF, | 160 | outb((adapter->mbox_dma >> 16) & 0xFF, |
161 | adapter->host->io_port + MBOX_PORT2); | 161 | adapter->host->io_port + MBOX_PORT2); |
162 | 162 | ||
163 | outb_p((adapter->mbox_dma >> 24) & 0xFF, | 163 | outb((adapter->mbox_dma >> 24) & 0xFF, |
164 | adapter->host->io_port + MBOX_PORT3); | 164 | adapter->host->io_port + MBOX_PORT3); |
165 | 165 | ||
166 | outb_p(ENABLE_MBOX_BYTE, | 166 | outb(ENABLE_MBOX_BYTE, |
167 | adapter->host->io_port + ENABLE_MBOX_REGION); | 167 | adapter->host->io_port + ENABLE_MBOX_REGION); |
168 | 168 | ||
169 | irq_ack(adapter); | 169 | irq_ack(adapter); |
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c new file mode 100755 index 000000000000..30e20e69715a --- /dev/null +++ b/drivers/scsi/mvsas.c | |||
@@ -0,0 +1,2981 @@ | |||
1 | /* | ||
2 | mvsas.c - Marvell 88SE6440 SAS/SATA support | ||
3 | |||
4 | Copyright 2007 Red Hat, Inc. | ||
5 | Copyright 2008 Marvell. <kewei@marvell.com> | ||
6 | |||
7 | This program is free software; you can redistribute it and/or | ||
8 | modify it under the terms of the GNU General Public License as | ||
9 | published by the Free Software Foundation; either version 2, | ||
10 | or (at your option) any later version. | ||
11 | |||
12 | This program is distributed in the hope that it will be useful, | ||
13 | but WITHOUT ANY WARRANTY; without even the implied warranty | ||
14 | of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
15 | See the GNU General Public License for more details. | ||
16 | |||
17 | You should have received a copy of the GNU General Public | ||
18 | License along with this program; see the file COPYING. If not, | ||
19 | write to the Free Software Foundation, 675 Mass Ave, Cambridge, | ||
20 | MA 02139, USA. | ||
21 | |||
22 | --------------------------------------------------------------- | ||
23 | |||
24 | Random notes: | ||
25 | * hardware supports controlling the endian-ness of data | ||
26 | structures. this permits elimination of all the le32_to_cpu() | ||
27 | and cpu_to_le32() conversions. | ||
28 | |||
29 | */ | ||
30 | |||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/pci.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include <scsi/libsas.h> | ||
39 | #include <asm/io.h> | ||
40 | |||
41 | #define DRV_NAME "mvsas" | ||
42 | #define DRV_VERSION "0.5" | ||
43 | #define _MV_DUMP 0 | ||
44 | #define MVS_DISABLE_NVRAM | ||
45 | #define MVS_DISABLE_MSI | ||
46 | |||
47 | #define mr32(reg) readl(regs + MVS_##reg) | ||
48 | #define mw32(reg,val) writel((val), regs + MVS_##reg) | ||
49 | #define mw32_f(reg,val) do { \ | ||
50 | writel((val), regs + MVS_##reg); \ | ||
51 | readl(regs + MVS_##reg); \ | ||
52 | } while (0) | ||
53 | |||
54 | #define MVS_ID_NOT_MAPPED 0xff | ||
55 | #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) | ||
56 | |||
57 | /* offset for D2H FIS in the Received FIS List Structure */ | ||
58 | #define SATA_RECEIVED_D2H_FIS(reg_set) \ | ||
59 | ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) | ||
60 | #define SATA_RECEIVED_PIO_FIS(reg_set) \ | ||
61 | ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) | ||
62 | #define UNASSOC_D2H_FIS(id) \ | ||
63 | ((void *) mvi->rx_fis + 0x100 * id) | ||
64 | |||
65 | #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ | ||
66 | for ((__mc) = (__lseq_mask), (__lseq) = 0; \ | ||
67 | (__mc) != 0 && __rest; \ | ||
68 | (++__lseq), (__mc) >>= 1) | ||
69 | |||
70 | /* driver compile-time configuration */ | ||
71 | enum driver_configuration { | ||
72 | MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ | ||
73 | MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ | ||
74 | /* software requires power-of-2 | ||
75 | ring size */ | ||
76 | |||
77 | MVS_SLOTS = 512, /* command slots */ | ||
78 | MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ | ||
79 | MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ | ||
80 | MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ | ||
81 | MVS_OAF_SZ = 64, /* Open address frame buffer size */ | ||
82 | |||
83 | MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ | ||
84 | |||
85 | MVS_QUEUE_SIZE = 30, /* Support Queue depth */ | ||
86 | }; | ||
87 | |||
88 | /* unchangeable hardware details */ | ||
89 | enum hardware_details { | ||
90 | MVS_MAX_PHYS = 8, /* max. possible phys */ | ||
91 | MVS_MAX_PORTS = 8, /* max. possible ports */ | ||
92 | MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), | ||
93 | }; | ||
94 | |||
95 | /* peripheral registers (BAR2) */ | ||
96 | enum peripheral_registers { | ||
97 | SPI_CTL = 0x10, /* EEPROM control */ | ||
98 | SPI_CMD = 0x14, /* EEPROM command */ | ||
99 | SPI_DATA = 0x18, /* EEPROM data */ | ||
100 | }; | ||
101 | |||
102 | enum peripheral_register_bits { | ||
103 | TWSI_RDY = (1U << 7), /* EEPROM interface ready */ | ||
104 | TWSI_RD = (1U << 4), /* EEPROM read access */ | ||
105 | |||
106 | SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ | ||
107 | }; | ||
108 | |||
109 | /* enhanced mode registers (BAR4) */ | ||
110 | enum hw_registers { | ||
111 | MVS_GBL_CTL = 0x04, /* global control */ | ||
112 | MVS_GBL_INT_STAT = 0x08, /* global irq status */ | ||
113 | MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ | ||
114 | MVS_GBL_PORT_TYPE = 0xa0, /* port type */ | ||
115 | |||
116 | MVS_CTL = 0x100, /* SAS/SATA port configuration */ | ||
117 | MVS_PCS = 0x104, /* SAS/SATA port control/status */ | ||
118 | MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ | ||
119 | MVS_CMD_LIST_HI = 0x10C, | ||
120 | MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ | ||
121 | MVS_RX_FIS_HI = 0x114, | ||
122 | |||
123 | MVS_TX_CFG = 0x120, /* TX configuration */ | ||
124 | MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ | ||
125 | MVS_TX_HI = 0x128, | ||
126 | |||
127 | MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ | ||
128 | MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ | ||
129 | MVS_RX_CFG = 0x134, /* RX configuration */ | ||
130 | MVS_RX_LO = 0x138, /* RX (completion) ring addr */ | ||
131 | MVS_RX_HI = 0x13C, | ||
132 | MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ | ||
133 | |||
134 | MVS_INT_COAL = 0x148, /* Int coalescing config */ | ||
135 | MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ | ||
136 | MVS_INT_STAT = 0x150, /* Central int status */ | ||
137 | MVS_INT_MASK = 0x154, /* Central int enable */ | ||
138 | MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ | ||
139 | MVS_INT_MASK_SRS = 0x15C, | ||
140 | |||
141 | /* ports 1-3 follow after this */ | ||
142 | MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ | ||
143 | MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ | ||
144 | MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ | ||
145 | MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ | ||
146 | |||
147 | /* ports 1-3 follow after this */ | ||
148 | MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ | ||
149 | MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ | ||
150 | |||
151 | MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ | ||
152 | MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ | ||
153 | |||
154 | /* ports 1-3 follow after this */ | ||
155 | MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ | ||
156 | MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ | ||
157 | MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ | ||
158 | MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ | ||
159 | |||
160 | /* ports 1-3 follow after this */ | ||
161 | MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ | ||
162 | MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ | ||
163 | MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ | ||
164 | MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ | ||
165 | }; | ||
166 | |||
167 | enum hw_register_bits { | ||
168 | /* MVS_GBL_CTL */ | ||
169 | INT_EN = (1U << 1), /* Global int enable */ | ||
170 | HBA_RST = (1U << 0), /* HBA reset */ | ||
171 | |||
172 | /* MVS_GBL_INT_STAT */ | ||
173 | INT_XOR = (1U << 4), /* XOR engine event */ | ||
174 | INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ | ||
175 | |||
176 | /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ | ||
177 | SATA_TARGET = (1U << 16), /* port0 SATA target enable */ | ||
178 | MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ | ||
179 | MODE_AUTO_DET_PORT6 = (1U << 14), | ||
180 | MODE_AUTO_DET_PORT5 = (1U << 13), | ||
181 | MODE_AUTO_DET_PORT4 = (1U << 12), | ||
182 | MODE_AUTO_DET_PORT3 = (1U << 11), | ||
183 | MODE_AUTO_DET_PORT2 = (1U << 10), | ||
184 | MODE_AUTO_DET_PORT1 = (1U << 9), | ||
185 | MODE_AUTO_DET_PORT0 = (1U << 8), | ||
186 | MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | | ||
187 | MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | | ||
188 | MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | | ||
189 | MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, | ||
190 | MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ | ||
191 | MODE_SAS_PORT6_MASK = (1U << 6), | ||
192 | MODE_SAS_PORT5_MASK = (1U << 5), | ||
193 | MODE_SAS_PORT4_MASK = (1U << 4), | ||
194 | MODE_SAS_PORT3_MASK = (1U << 3), | ||
195 | MODE_SAS_PORT2_MASK = (1U << 2), | ||
196 | MODE_SAS_PORT1_MASK = (1U << 1), | ||
197 | MODE_SAS_PORT0_MASK = (1U << 0), | ||
198 | MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | | ||
199 | MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | | ||
200 | MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | | ||
201 | MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, | ||
202 | |||
203 | /* SAS_MODE value may be | ||
204 | * dictated (in hw) by values | ||
205 | * of SATA_TARGET & AUTO_DET | ||
206 | */ | ||
207 | |||
208 | /* MVS_TX_CFG */ | ||
209 | TX_EN = (1U << 16), /* Enable TX */ | ||
210 | TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ | ||
211 | |||
212 | /* MVS_RX_CFG */ | ||
213 | RX_EN = (1U << 16), /* Enable RX */ | ||
214 | RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ | ||
215 | |||
216 | /* MVS_INT_COAL */ | ||
217 | COAL_EN = (1U << 16), /* Enable int coalescing */ | ||
218 | |||
219 | /* MVS_INT_STAT, MVS_INT_MASK */ | ||
220 | CINT_I2C = (1U << 31), /* I2C event */ | ||
221 | CINT_SW0 = (1U << 30), /* software event 0 */ | ||
222 | CINT_SW1 = (1U << 29), /* software event 1 */ | ||
223 | CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ | ||
224 | CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ | ||
225 | CINT_MEM = (1U << 26), /* int mem parity err */ | ||
226 | CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ | ||
227 | CINT_SRS = (1U << 3), /* SRS event */ | ||
228 | CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ | ||
229 | CINT_DONE = (1U << 0), /* cmd completion */ | ||
230 | |||
231 | /* shl for ports 1-3 */ | ||
232 | CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ | ||
233 | CINT_PORT = (1U << 8), /* port0 event */ | ||
234 | CINT_PORT_MASK_OFFSET = 8, | ||
235 | CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), | ||
236 | |||
237 | /* TX (delivery) ring bits */ | ||
238 | TXQ_CMD_SHIFT = 29, | ||
239 | TXQ_CMD_SSP = 1, /* SSP protocol */ | ||
240 | TXQ_CMD_SMP = 2, /* SMP protocol */ | ||
241 | TXQ_CMD_STP = 3, /* STP/SATA protocol */ | ||
242 | TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ | ||
243 | TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ | ||
244 | TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ | ||
245 | TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ | ||
246 | TXQ_SRS_SHIFT = 20, /* SATA register set */ | ||
247 | TXQ_SRS_MASK = 0x7f, | ||
248 | TXQ_PHY_SHIFT = 12, /* PHY bitmap */ | ||
249 | TXQ_PHY_MASK = 0xff, | ||
250 | TXQ_SLOT_MASK = 0xfff, /* slot number */ | ||
251 | |||
252 | /* RX (completion) ring bits */ | ||
253 | RXQ_GOOD = (1U << 23), /* Response good */ | ||
254 | RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ | ||
255 | RXQ_CMD_RX = (1U << 20), /* target cmd received */ | ||
256 | RXQ_ATTN = (1U << 19), /* attention */ | ||
257 | RXQ_RSP = (1U << 18), /* response frame xfer'd */ | ||
258 | RXQ_ERR = (1U << 17), /* err info rec xfer'd */ | ||
259 | RXQ_DONE = (1U << 16), /* cmd complete */ | ||
260 | RXQ_SLOT_MASK = 0xfff, /* slot number */ | ||
261 | |||
262 | /* mvs_cmd_hdr bits */ | ||
263 | MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ | ||
264 | MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ | ||
265 | |||
266 | /* SSP initiator only */ | ||
267 | MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ | ||
268 | |||
269 | /* SSP initiator or target */ | ||
270 | MCH_SSP_FR_TASK = 0x1, /* TASK frame */ | ||
271 | |||
272 | /* SSP target only */ | ||
273 | MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ | ||
274 | MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ | ||
275 | MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ | ||
276 | MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ | ||
277 | |||
278 | MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ | ||
279 | MCH_FBURST = (1U << 11), /* first burst (SSP) */ | ||
280 | MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ | ||
281 | MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ | ||
282 | MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ | ||
283 | MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ | ||
284 | MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ | ||
285 | MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ | ||
286 | MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ | ||
287 | MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ | ||
288 | |||
289 | CCTL_RST = (1U << 5), /* port logic reset */ | ||
290 | |||
291 | /* 0(LSB first), 1(MSB first) */ | ||
292 | CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ | ||
293 | CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ | ||
294 | CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ | ||
295 | CCTL_ENDIAN_CMD = (1U << 0), /* command table */ | ||
296 | |||
297 | /* MVS_Px_SER_CTLSTAT (per-phy control) */ | ||
298 | PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ | ||
299 | PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ | ||
300 | PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ | ||
301 | PHY_RST = (1U << 0), /* phy reset */ | ||
302 | PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), | ||
303 | PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), | ||
304 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), | ||
305 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK = | ||
306 | (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), | ||
307 | PHY_READY_MASK = (1U << 20), | ||
308 | |||
309 | /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ | ||
310 | PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ | ||
311 | PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ | ||
312 | PHYEV_AN = (1U << 18), /* SATA async notification */ | ||
313 | PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ | ||
314 | PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ | ||
315 | PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ | ||
316 | PHYEV_IU_BIG = (1U << 11), /* IU too long err */ | ||
317 | PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ | ||
318 | PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ | ||
319 | PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ | ||
320 | PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ | ||
321 | PHYEV_PORT_SEL = (1U << 6), /* port selector present */ | ||
322 | PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ | ||
323 | PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ | ||
324 | PHYEV_ID_FAIL = (1U << 3), /* identify failed */ | ||
325 | PHYEV_ID_DONE = (1U << 2), /* identify done */ | ||
326 | PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ | ||
327 | PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ | ||
328 | |||
329 | /* MVS_PCS */ | ||
330 | PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ | ||
331 | PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ | ||
332 | PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ | ||
333 | PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ | ||
334 | PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ | ||
335 | PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ | ||
336 | PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ | ||
337 | PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ | ||
338 | PCS_CMD_RST = (1U << 1), /* reset cmd issue */ | ||
339 | PCS_CMD_EN = (1U << 0), /* enable cmd issue */ | ||
340 | |||
341 | /* Port n Attached Device Info */ | ||
342 | PORT_DEV_SSP_TRGT = (1U << 19), | ||
343 | PORT_DEV_SMP_TRGT = (1U << 18), | ||
344 | PORT_DEV_STP_TRGT = (1U << 17), | ||
345 | PORT_DEV_SSP_INIT = (1U << 11), | ||
346 | PORT_DEV_SMP_INIT = (1U << 10), | ||
347 | PORT_DEV_STP_INIT = (1U << 9), | ||
348 | PORT_PHY_ID_MASK = (0xFFU << 24), | ||
349 | PORT_DEV_TRGT_MASK = (0x7U << 17), | ||
350 | PORT_DEV_INIT_MASK = (0x7U << 9), | ||
351 | PORT_DEV_TYPE_MASK = (0x7U << 0), | ||
352 | |||
353 | /* Port n PHY Status */ | ||
354 | PHY_RDY = (1U << 2), | ||
355 | PHY_DW_SYNC = (1U << 1), | ||
356 | PHY_OOB_DTCTD = (1U << 0), | ||
357 | |||
358 | /* VSR */ | ||
359 | /* PHYMODE 6 (CDB) */ | ||
360 | PHY_MODE6_DTL_SPEED = (1U << 27), | ||
361 | }; | ||
362 | |||
363 | enum mvs_info_flags { | ||
364 | MVF_MSI = (1U << 0), /* MSI is enabled */ | ||
365 | MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ | ||
366 | }; | ||
367 | |||
368 | enum sas_cmd_port_registers { | ||
369 | CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ | ||
370 | CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ | ||
371 | CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ | ||
372 | CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ | ||
373 | CMD_OOB_SPACE = 0x110, /* OOB space control register */ | ||
374 | CMD_OOB_BURST = 0x114, /* OOB burst control register */ | ||
375 | CMD_PHY_TIMER = 0x118, /* PHY timer control register */ | ||
376 | CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ | ||
377 | CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ | ||
378 | CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ | ||
379 | CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ | ||
380 | CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ | ||
381 | CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ | ||
382 | CMD_ID_TEST = 0x134, /* ID test register */ | ||
383 | CMD_PL_TIMER = 0x138, /* PL timer register */ | ||
384 | CMD_WD_TIMER = 0x13c, /* WD timer register */ | ||
385 | CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ | ||
386 | CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ | ||
387 | CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ | ||
388 | CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ | ||
389 | CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ | ||
390 | CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ | ||
391 | CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ | ||
392 | CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ | ||
393 | CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ | ||
394 | CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ | ||
395 | CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ | ||
396 | CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ | ||
397 | CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ | ||
398 | CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ | ||
399 | CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ | ||
400 | CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ | ||
401 | CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ | ||
402 | CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ | ||
403 | CMD_RESET_COUNT = 0x188, /* Reset Count */ | ||
404 | CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ | ||
405 | CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ | ||
406 | CMD_PHY_CTL = 0x194, /* PHY Control and Status */ | ||
407 | CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ | ||
408 | CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ | ||
409 | CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ | ||
410 | CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ | ||
411 | CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ | ||
412 | CMD_HOST_CTL = 0x1AC, /* Host Control Status */ | ||
413 | CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ | ||
414 | CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ | ||
415 | CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ | ||
416 | CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ | ||
417 | CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ | ||
418 | CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ | ||
419 | }; | ||
420 | |||
421 | /* SAS/SATA configuration port registers, aka phy registers */ | ||
422 | enum sas_sata_config_port_regs { | ||
423 | PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ | ||
424 | PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ | ||
425 | PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ | ||
426 | PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ | ||
427 | PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ | ||
428 | PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ | ||
429 | PHYR_SATA_CTL = 0x18, /* SATA control */ | ||
430 | PHYR_PHY_STAT = 0x1C, /* PHY status */ | ||
431 | PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ | ||
432 | PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ | ||
433 | PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ | ||
434 | PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ | ||
435 | PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ | ||
436 | PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ | ||
437 | PHYR_WIDE_PORT = 0x38, /* wide port participating */ | ||
438 | PHYR_CURRENT0 = 0x80, /* current connection info 0 */ | ||
439 | PHYR_CURRENT1 = 0x84, /* current connection info 1 */ | ||
440 | PHYR_CURRENT2 = 0x88, /* current connection info 2 */ | ||
441 | }; | ||
442 | |||
443 | /* SAS/SATA Vendor Specific Port Registers */ | ||
444 | enum sas_sata_vsp_regs { | ||
445 | VSR_PHY_STAT = 0x00, /* Phy Status */ | ||
446 | VSR_PHY_MODE1 = 0x01, /* phy tx */ | ||
447 | VSR_PHY_MODE2 = 0x02, /* tx scc */ | ||
448 | VSR_PHY_MODE3 = 0x03, /* pll */ | ||
449 | VSR_PHY_MODE4 = 0x04, /* VCO */ | ||
450 | VSR_PHY_MODE5 = 0x05, /* Rx */ | ||
451 | VSR_PHY_MODE6 = 0x06, /* CDR */ | ||
452 | VSR_PHY_MODE7 = 0x07, /* Impedance */ | ||
453 | VSR_PHY_MODE8 = 0x08, /* Voltage */ | ||
454 | VSR_PHY_MODE9 = 0x09, /* Test */ | ||
455 | VSR_PHY_MODE10 = 0x0A, /* Power */ | ||
456 | VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ | ||
457 | VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ | ||
458 | VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ | ||
459 | }; | ||
460 | |||
461 | enum pci_cfg_registers { | ||
462 | PCR_PHY_CTL = 0x40, | ||
463 | PCR_PHY_CTL2 = 0x90, | ||
464 | PCR_DEV_CTRL = 0xE8, | ||
465 | }; | ||
466 | |||
467 | enum pci_cfg_register_bits { | ||
468 | PCTL_PWR_ON = (0xFU << 24), | ||
469 | PCTL_OFF = (0xFU << 12), | ||
470 | PRD_REQ_SIZE = (0x4000), | ||
471 | PRD_REQ_MASK = (0x00007000), | ||
472 | }; | ||
473 | |||
474 | enum nvram_layout_offsets { | ||
475 | NVR_SIG = 0x00, /* 0xAA, 0x55 */ | ||
476 | NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ | ||
477 | }; | ||
478 | |||
479 | enum chip_flavors { | ||
480 | chip_6320, | ||
481 | chip_6440, | ||
482 | chip_6480, | ||
483 | }; | ||
484 | |||
485 | enum port_type { | ||
486 | PORT_TYPE_SAS = (1L << 1), | ||
487 | PORT_TYPE_SATA = (1L << 0), | ||
488 | }; | ||
489 | |||
490 | /* Command Table Format */ | ||
491 | enum ct_format { | ||
492 | /* SSP */ | ||
493 | SSP_F_H = 0x00, | ||
494 | SSP_F_IU = 0x18, | ||
495 | SSP_F_MAX = 0x4D, | ||
496 | /* STP */ | ||
497 | STP_CMD_FIS = 0x00, | ||
498 | STP_ATAPI_CMD = 0x40, | ||
499 | STP_F_MAX = 0x10, | ||
500 | /* SMP */ | ||
501 | SMP_F_T = 0x00, | ||
502 | SMP_F_DEP = 0x01, | ||
503 | SMP_F_MAX = 0x101, | ||
504 | }; | ||
505 | |||
506 | enum status_buffer { | ||
507 | SB_EIR_OFF = 0x00, /* Error Information Record */ | ||
508 | SB_RFB_OFF = 0x08, /* Response Frame Buffer */ | ||
509 | SB_RFB_MAX = 0x400, /* RFB size*/ | ||
510 | }; | ||
511 | |||
512 | enum error_info_rec { | ||
513 | CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ | ||
514 | }; | ||
515 | |||
516 | struct mvs_chip_info { | ||
517 | u32 n_phy; | ||
518 | u32 srs_sz; | ||
519 | u32 slot_width; | ||
520 | }; | ||
521 | |||
522 | struct mvs_err_info { | ||
523 | __le32 flags; | ||
524 | __le32 flags2; | ||
525 | }; | ||
526 | |||
527 | struct mvs_prd { | ||
528 | __le64 addr; /* 64-bit buffer address */ | ||
529 | __le32 reserved; | ||
530 | __le32 len; /* 16-bit length */ | ||
531 | }; | ||
532 | |||
533 | struct mvs_cmd_hdr { | ||
534 | __le32 flags; /* PRD tbl len; SAS, SATA ctl */ | ||
535 | __le32 lens; /* cmd, max resp frame len */ | ||
536 | __le32 tags; /* targ port xfer tag; tag */ | ||
537 | __le32 data_len; /* data xfer len */ | ||
538 | __le64 cmd_tbl; /* command table address */ | ||
539 | __le64 open_frame; /* open addr frame address */ | ||
540 | __le64 status_buf; /* status buffer address */ | ||
541 | __le64 prd_tbl; /* PRD tbl address */ | ||
542 | __le32 reserved[4]; | ||
543 | }; | ||
544 | |||
545 | struct mvs_slot_info { | ||
546 | struct sas_task *task; | ||
547 | u32 n_elem; | ||
548 | u32 tx; | ||
549 | |||
550 | /* DMA buffer for storing cmd tbl, open addr frame, status buffer, | ||
551 | * and PRD table | ||
552 | */ | ||
553 | void *buf; | ||
554 | dma_addr_t buf_dma; | ||
555 | #if _MV_DUMP | ||
556 | u32 cmd_size; | ||
557 | #endif | ||
558 | |||
559 | void *response; | ||
560 | }; | ||
561 | |||
562 | struct mvs_port { | ||
563 | struct asd_sas_port sas_port; | ||
564 | u8 port_attached; | ||
565 | u8 taskfileset; | ||
566 | u8 wide_port_phymap; | ||
567 | }; | ||
568 | |||
569 | struct mvs_phy { | ||
570 | struct mvs_port *port; | ||
571 | struct asd_sas_phy sas_phy; | ||
572 | struct sas_identify identify; | ||
573 | struct scsi_device *sdev; | ||
574 | u64 dev_sas_addr; | ||
575 | u64 att_dev_sas_addr; | ||
576 | u32 att_dev_info; | ||
577 | u32 dev_info; | ||
578 | u32 phy_type; | ||
579 | u32 phy_status; | ||
580 | u32 irq_status; | ||
581 | u32 frame_rcvd_size; | ||
582 | u8 frame_rcvd[32]; | ||
583 | u8 phy_attached; | ||
584 | }; | ||
585 | |||
586 | struct mvs_info { | ||
587 | unsigned long flags; | ||
588 | |||
589 | spinlock_t lock; /* host-wide lock */ | ||
590 | struct pci_dev *pdev; /* our device */ | ||
591 | void __iomem *regs; /* enhanced mode registers */ | ||
592 | void __iomem *peri_regs; /* peripheral registers */ | ||
593 | |||
594 | u8 sas_addr[SAS_ADDR_SIZE]; | ||
595 | struct sas_ha_struct sas; /* SCSI/SAS glue */ | ||
596 | struct Scsi_Host *shost; | ||
597 | |||
598 | __le32 *tx; /* TX (delivery) DMA ring */ | ||
599 | dma_addr_t tx_dma; | ||
600 | u32 tx_prod; /* cached next-producer idx */ | ||
601 | |||
602 | __le32 *rx; /* RX (completion) DMA ring */ | ||
603 | dma_addr_t rx_dma; | ||
604 | u32 rx_cons; /* RX consumer idx */ | ||
605 | |||
606 | __le32 *rx_fis; /* RX'd FIS area */ | ||
607 | dma_addr_t rx_fis_dma; | ||
608 | |||
609 | struct mvs_cmd_hdr *slot; /* DMA command header slots */ | ||
610 | dma_addr_t slot_dma; | ||
611 | |||
612 | const struct mvs_chip_info *chip; | ||
613 | |||
614 | unsigned long tags[MVS_SLOTS]; | ||
615 | struct mvs_slot_info slot_info[MVS_SLOTS]; | ||
616 | /* further per-slot information */ | ||
617 | struct mvs_phy phy[MVS_MAX_PHYS]; | ||
618 | struct mvs_port port[MVS_MAX_PHYS]; | ||
619 | |||
620 | u32 can_queue; /* per adapter */ | ||
621 | u32 tag_out; /*Get*/ | ||
622 | u32 tag_in; /*Give*/ | ||
623 | }; | ||
624 | |||
625 | struct mvs_queue_task { | ||
626 | struct list_head list; | ||
627 | |||
628 | void *uldd_task; | ||
629 | }; | ||
630 | |||
631 | static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, | ||
632 | void *funcdata); | ||
633 | static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port); | ||
634 | static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val); | ||
635 | static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port); | ||
636 | static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, | ||
637 | u32 port, u32 val); | ||
638 | static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port); | ||
639 | static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val); | ||
640 | static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr); | ||
641 | static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port); | ||
642 | static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val); | ||
643 | static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr); | ||
644 | static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port); | ||
645 | static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val); | ||
646 | static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val); | ||
647 | static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port); | ||
648 | |||
649 | static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); | ||
650 | static void mvs_detect_porttype(struct mvs_info *mvi, int i); | ||
651 | static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); | ||
652 | static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port); | ||
653 | static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port); | ||
654 | static u32 mvs_is_sig_fis_received(u32 irq_status); | ||
655 | |||
656 | static int mvs_scan_finished(struct Scsi_Host *, unsigned long); | ||
657 | static void mvs_scan_start(struct Scsi_Host *); | ||
658 | static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev); | ||
659 | |||
660 | static struct scsi_transport_template *mvs_stt; | ||
661 | |||
662 | static const struct mvs_chip_info mvs_chips[] = { | ||
663 | [chip_6320] = { 2, 16, 9 }, | ||
664 | [chip_6440] = { 4, 16, 9 }, | ||
665 | [chip_6480] = { 8, 32, 10 }, | ||
666 | }; | ||
667 | |||
668 | static struct scsi_host_template mvs_sht = { | ||
669 | .module = THIS_MODULE, | ||
670 | .name = DRV_NAME, | ||
671 | .queuecommand = sas_queuecommand, | ||
672 | .target_alloc = sas_target_alloc, | ||
673 | .slave_configure = sas_slave_configure, | ||
674 | .slave_destroy = sas_slave_destroy, | ||
675 | .scan_finished = mvs_scan_finished, | ||
676 | .scan_start = mvs_scan_start, | ||
677 | .change_queue_depth = sas_change_queue_depth, | ||
678 | .change_queue_type = sas_change_queue_type, | ||
679 | .bios_param = sas_bios_param, | ||
680 | .can_queue = 1, | ||
681 | .cmd_per_lun = 1, | ||
682 | .this_id = -1, | ||
683 | .sg_tablesize = SG_ALL, | ||
684 | .max_sectors = SCSI_DEFAULT_MAX_SECTORS, | ||
685 | .use_clustering = ENABLE_CLUSTERING, | ||
686 | .eh_device_reset_handler = sas_eh_device_reset_handler, | ||
687 | .eh_bus_reset_handler = sas_eh_bus_reset_handler, | ||
688 | .slave_alloc = mvs_sas_slave_alloc, | ||
689 | .target_destroy = sas_target_destroy, | ||
690 | .ioctl = sas_ioctl, | ||
691 | }; | ||
692 | |||
693 | static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) | ||
694 | { | ||
695 | u32 i; | ||
696 | u32 run; | ||
697 | u32 offset; | ||
698 | |||
699 | offset = 0; | ||
700 | while (size) { | ||
701 | printk("%08X : ", baseaddr + offset); | ||
702 | if (size >= 16) | ||
703 | run = 16; | ||
704 | else | ||
705 | run = size; | ||
706 | size -= run; | ||
707 | for (i = 0; i < 16; i++) { | ||
708 | if (i < run) | ||
709 | printk("%02X ", (u32)data[i]); | ||
710 | else | ||
711 | printk(" "); | ||
712 | } | ||
713 | printk(": "); | ||
714 | for (i = 0; i < run; i++) | ||
715 | printk("%c", isalnum(data[i]) ? data[i] : '.'); | ||
716 | printk("\n"); | ||
717 | data = &data[16]; | ||
718 | offset += run; | ||
719 | } | ||
720 | printk("\n"); | ||
721 | } | ||
722 | |||
723 | static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, | ||
724 | enum sas_protocol proto) | ||
725 | { | ||
726 | #if _MV_DUMP | ||
727 | u32 offset; | ||
728 | struct pci_dev *pdev = mvi->pdev; | ||
729 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | ||
730 | |||
731 | offset = slot->cmd_size + MVS_OAF_SZ + | ||
732 | sizeof(struct mvs_prd) * slot->n_elem; | ||
733 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", | ||
734 | tag); | ||
735 | mvs_hexdump(32, (u8 *) slot->response, | ||
736 | (u32) slot->buf_dma + offset); | ||
737 | #endif | ||
738 | } | ||
739 | |||
740 | static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, | ||
741 | enum sas_protocol proto) | ||
742 | { | ||
743 | #if _MV_DUMP | ||
744 | u32 sz, w_ptr, r_ptr; | ||
745 | u64 addr; | ||
746 | void __iomem *regs = mvi->regs; | ||
747 | struct pci_dev *pdev = mvi->pdev; | ||
748 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | ||
749 | |||
750 | /*Delivery Queue */ | ||
751 | sz = mr32(TX_CFG) & TX_RING_SZ_MASK; | ||
752 | w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK; | ||
753 | r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK; | ||
754 | addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); | ||
755 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
756 | "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n", | ||
757 | sz, w_ptr, r_ptr); | ||
758 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
759 | "Delivery Queue Base Address=0x%llX (PA)" | ||
760 | "(tx_dma=0x%llX), Entry=%04d\n", | ||
761 | addr, mvi->tx_dma, w_ptr); | ||
762 | mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), | ||
763 | (u32) mvi->tx_dma + sizeof(u32) * w_ptr); | ||
764 | /*Command List */ | ||
765 | addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO); | ||
766 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
767 | "Command List Base Address=0x%llX (PA)" | ||
768 | "(slot_dma=0x%llX), Header=%03d\n", | ||
769 | addr, mvi->slot_dma, tag); | ||
770 | dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); | ||
771 | /*mvs_cmd_hdr */ | ||
772 | mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), | ||
773 | (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); | ||
774 | /*1.command table area */ | ||
775 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); | ||
776 | mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); | ||
777 | /*2.open address frame area */ | ||
778 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); | ||
779 | mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, | ||
780 | (u32) slot->buf_dma + slot->cmd_size); | ||
781 | /*3.status buffer */ | ||
782 | mvs_hba_sb_dump(mvi, tag, proto); | ||
783 | /*4.PRD table */ | ||
784 | dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); | ||
785 | mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, | ||
786 | (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, | ||
787 | (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); | ||
788 | #endif | ||
789 | } | ||
790 | |||
791 | static void mvs_hba_cq_dump(struct mvs_info *mvi) | ||
792 | { | ||
793 | #if _MV_DUMP | ||
794 | u64 addr; | ||
795 | void __iomem *regs = mvi->regs; | ||
796 | struct pci_dev *pdev = mvi->pdev; | ||
797 | u32 entry = mvi->rx_cons + 1; | ||
798 | u32 rx_desc = le32_to_cpu(mvi->rx[entry]); | ||
799 | |||
800 | /*Completion Queue */ | ||
801 | addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); | ||
802 | dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n", | ||
803 | (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); | ||
804 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
805 | "Completion List Base Address=0x%llX (PA), " | ||
806 | "CQ_Entry=%04d, CQ_WP=0x%08X\n", | ||
807 | addr, entry - 1, mvi->rx[0]); | ||
808 | mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), | ||
809 | mvi->rx_dma + sizeof(u32) * entry); | ||
810 | #endif | ||
811 | } | ||
812 | |||
813 | static void mvs_hba_interrupt_enable(struct mvs_info *mvi) | ||
814 | { | ||
815 | void __iomem *regs = mvi->regs; | ||
816 | u32 tmp; | ||
817 | |||
818 | tmp = mr32(GBL_CTL); | ||
819 | |||
820 | mw32(GBL_CTL, tmp | INT_EN); | ||
821 | } | ||
822 | |||
823 | static void mvs_hba_interrupt_disable(struct mvs_info *mvi) | ||
824 | { | ||
825 | void __iomem *regs = mvi->regs; | ||
826 | u32 tmp; | ||
827 | |||
828 | tmp = mr32(GBL_CTL); | ||
829 | |||
830 | mw32(GBL_CTL, tmp & ~INT_EN); | ||
831 | } | ||
832 | |||
833 | static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); | ||
834 | |||
835 | /* move to PCI layer or libata core? */ | ||
836 | static int pci_go_64(struct pci_dev *pdev) | ||
837 | { | ||
838 | int rc; | ||
839 | |||
840 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | ||
841 | rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | ||
842 | if (rc) { | ||
843 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
844 | if (rc) { | ||
845 | dev_printk(KERN_ERR, &pdev->dev, | ||
846 | "64-bit DMA enable failed\n"); | ||
847 | return rc; | ||
848 | } | ||
849 | } | ||
850 | } else { | ||
851 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
852 | if (rc) { | ||
853 | dev_printk(KERN_ERR, &pdev->dev, | ||
854 | "32-bit DMA enable failed\n"); | ||
855 | return rc; | ||
856 | } | ||
857 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
858 | if (rc) { | ||
859 | dev_printk(KERN_ERR, &pdev->dev, | ||
860 | "32-bit consistent DMA enable failed\n"); | ||
861 | return rc; | ||
862 | } | ||
863 | } | ||
864 | |||
865 | return rc; | ||
866 | } | ||
867 | |||
868 | static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) | ||
869 | { | ||
870 | mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1); | ||
871 | mvi->tags[mvi->tag_in] = tag; | ||
872 | } | ||
873 | |||
874 | static void mvs_tag_free(struct mvs_info *mvi, u32 tag) | ||
875 | { | ||
876 | mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1); | ||
877 | } | ||
878 | |||
879 | static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) | ||
880 | { | ||
881 | if (mvi->tag_out != mvi->tag_in) { | ||
882 | *tag_out = mvi->tags[mvi->tag_out]; | ||
883 | mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1); | ||
884 | return 0; | ||
885 | } | ||
886 | return -EBUSY; | ||
887 | } | ||
888 | |||
889 | static void mvs_tag_init(struct mvs_info *mvi) | ||
890 | { | ||
891 | int i; | ||
892 | for (i = 0; i < MVS_SLOTS; ++i) | ||
893 | mvi->tags[i] = i; | ||
894 | mvi->tag_out = 0; | ||
895 | mvi->tag_in = MVS_SLOTS - 1; | ||
896 | } | ||
897 | |||
898 | #ifndef MVS_DISABLE_NVRAM | ||
899 | static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) | ||
900 | { | ||
901 | int timeout = 1000; | ||
902 | |||
903 | if (addr & ~SPI_ADDR_MASK) | ||
904 | return -EINVAL; | ||
905 | |||
906 | writel(addr, regs + SPI_CMD); | ||
907 | writel(TWSI_RD, regs + SPI_CTL); | ||
908 | |||
909 | while (timeout-- > 0) { | ||
910 | if (readl(regs + SPI_CTL) & TWSI_RDY) { | ||
911 | *data = readl(regs + SPI_DATA); | ||
912 | return 0; | ||
913 | } | ||
914 | |||
915 | udelay(10); | ||
916 | } | ||
917 | |||
918 | return -EBUSY; | ||
919 | } | ||
920 | |||
921 | static int mvs_eep_read_buf(void __iomem *regs, u32 addr, | ||
922 | void *buf, u32 buflen) | ||
923 | { | ||
924 | u32 addr_end, tmp_addr, i, j; | ||
925 | u32 tmp = 0; | ||
926 | int rc; | ||
927 | u8 *tmp8, *buf8 = buf; | ||
928 | |||
929 | addr_end = addr + buflen; | ||
930 | tmp_addr = ALIGN(addr, 4); | ||
931 | if (addr > 0xff) | ||
932 | return -EINVAL; | ||
933 | |||
934 | j = addr & 0x3; | ||
935 | if (j) { | ||
936 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
937 | if (rc) | ||
938 | return rc; | ||
939 | |||
940 | tmp8 = (u8 *)&tmp; | ||
941 | for (i = j; i < 4; i++) | ||
942 | *buf8++ = tmp8[i]; | ||
943 | |||
944 | tmp_addr += 4; | ||
945 | } | ||
946 | |||
947 | for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { | ||
948 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
949 | if (rc) | ||
950 | return rc; | ||
951 | |||
952 | memcpy(buf8, &tmp, 4); | ||
953 | buf8 += 4; | ||
954 | } | ||
955 | |||
956 | if (tmp_addr < addr_end) { | ||
957 | rc = mvs_eep_read(regs, tmp_addr, &tmp); | ||
958 | if (rc) | ||
959 | return rc; | ||
960 | |||
961 | tmp8 = (u8 *)&tmp; | ||
962 | j = addr_end - tmp_addr; | ||
963 | for (i = 0; i < j; i++) | ||
964 | *buf8++ = tmp8[i]; | ||
965 | |||
966 | tmp_addr += 4; | ||
967 | } | ||
968 | |||
969 | return 0; | ||
970 | } | ||
971 | #endif | ||
972 | |||
973 | static int mvs_nvram_read(struct mvs_info *mvi, u32 addr, | ||
974 | void *buf, u32 buflen) | ||
975 | { | ||
976 | #ifndef MVS_DISABLE_NVRAM | ||
977 | void __iomem *regs = mvi->regs; | ||
978 | int rc, i; | ||
979 | u32 sum; | ||
980 | u8 hdr[2], *tmp; | ||
981 | const char *msg; | ||
982 | |||
983 | rc = mvs_eep_read_buf(regs, addr, &hdr, 2); | ||
984 | if (rc) { | ||
985 | msg = "nvram hdr read failed"; | ||
986 | goto err_out; | ||
987 | } | ||
988 | rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); | ||
989 | if (rc) { | ||
990 | msg = "nvram read failed"; | ||
991 | goto err_out; | ||
992 | } | ||
993 | |||
994 | if (hdr[0] != 0x5A) { | ||
995 | /* entry id */ | ||
996 | msg = "invalid nvram entry id"; | ||
997 | rc = -ENOENT; | ||
998 | goto err_out; | ||
999 | } | ||
1000 | |||
1001 | tmp = buf; | ||
1002 | sum = ((u32)hdr[0]) + ((u32)hdr[1]); | ||
1003 | for (i = 0; i < buflen; i++) | ||
1004 | sum += ((u32)tmp[i]); | ||
1005 | |||
1006 | if (sum) { | ||
1007 | msg = "nvram checksum failure"; | ||
1008 | rc = -EILSEQ; | ||
1009 | goto err_out; | ||
1010 | } | ||
1011 | |||
1012 | return 0; | ||
1013 | |||
1014 | err_out: | ||
1015 | dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); | ||
1016 | return rc; | ||
1017 | #else | ||
1018 | /* FIXME , For SAS target mode */ | ||
1019 | memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8); | ||
1020 | return 0; | ||
1021 | #endif | ||
1022 | } | ||
1023 | |||
1024 | static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) | ||
1025 | { | ||
1026 | struct mvs_phy *phy = &mvi->phy[i]; | ||
1027 | |||
1028 | if (!phy->phy_attached) | ||
1029 | return; | ||
1030 | |||
1031 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
1032 | struct sas_identify_frame *id; | ||
1033 | |||
1034 | id = (struct sas_identify_frame *)phy->frame_rcvd; | ||
1035 | id->dev_type = phy->identify.device_type; | ||
1036 | id->initiator_bits = SAS_PROTOCOL_ALL; | ||
1037 | id->target_bits = phy->identify.target_port_protocols; | ||
1038 | } else if (phy->phy_type & PORT_TYPE_SATA) { | ||
1039 | /* TODO */ | ||
1040 | } | ||
1041 | mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; | ||
1042 | mvi->sas.notify_port_event(mvi->sas.sas_phy[i], | ||
1043 | PORTE_BYTES_DMAED); | ||
1044 | } | ||
1045 | |||
1046 | static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) | ||
1047 | { | ||
1048 | /* give the phy enabling interrupt event time to come in (1s | ||
1049 | * is empirically about all it takes) */ | ||
1050 | if (time < HZ) | ||
1051 | return 0; | ||
1052 | /* Wait for discovery to finish */ | ||
1053 | scsi_flush_work(shost); | ||
1054 | return 1; | ||
1055 | } | ||
1056 | |||
1057 | static void mvs_scan_start(struct Scsi_Host *shost) | ||
1058 | { | ||
1059 | int i; | ||
1060 | struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; | ||
1061 | |||
1062 | for (i = 0; i < mvi->chip->n_phy; ++i) { | ||
1063 | mvs_bytes_dmaed(mvi, i); | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1067 | static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev) | ||
1068 | { | ||
1069 | int rc; | ||
1070 | |||
1071 | rc = sas_slave_alloc(scsi_dev); | ||
1072 | |||
1073 | return rc; | ||
1074 | } | ||
1075 | |||
1076 | static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events) | ||
1077 | { | ||
1078 | struct pci_dev *pdev = mvi->pdev; | ||
1079 | struct sas_ha_struct *sas_ha = &mvi->sas; | ||
1080 | struct mvs_phy *phy = &mvi->phy[port_no]; | ||
1081 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | ||
1082 | |||
1083 | phy->irq_status = mvs_read_port_irq_stat(mvi, port_no); | ||
1084 | /* | ||
1085 | * events is port event now , | ||
1086 | * we need check the interrupt status which belongs to per port. | ||
1087 | */ | ||
1088 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1089 | "Port %d Event = %X\n", | ||
1090 | port_no, phy->irq_status); | ||
1091 | |||
1092 | if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { | ||
1093 | if (!mvs_is_phy_ready(mvi, port_no)) { | ||
1094 | sas_phy_disconnected(sas_phy); | ||
1095 | sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); | ||
1096 | } else | ||
1097 | mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); | ||
1098 | } | ||
1099 | if (!(phy->irq_status & PHYEV_DEC_ERR)) { | ||
1100 | if (phy->irq_status & PHYEV_COMWAKE) { | ||
1101 | u32 tmp = mvs_read_port_irq_mask(mvi, port_no); | ||
1102 | mvs_write_port_irq_mask(mvi, port_no, | ||
1103 | tmp | PHYEV_SIG_FIS); | ||
1104 | } | ||
1105 | if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { | ||
1106 | phy->phy_status = mvs_is_phy_ready(mvi, port_no); | ||
1107 | if (phy->phy_status) { | ||
1108 | mvs_detect_porttype(mvi, port_no); | ||
1109 | |||
1110 | if (phy->phy_type & PORT_TYPE_SATA) { | ||
1111 | u32 tmp = mvs_read_port_irq_mask(mvi, | ||
1112 | port_no); | ||
1113 | tmp &= ~PHYEV_SIG_FIS; | ||
1114 | mvs_write_port_irq_mask(mvi, | ||
1115 | port_no, tmp); | ||
1116 | } | ||
1117 | |||
1118 | mvs_update_phyinfo(mvi, port_no, 0); | ||
1119 | sas_ha->notify_phy_event(sas_phy, | ||
1120 | PHYE_OOB_DONE); | ||
1121 | mvs_bytes_dmaed(mvi, port_no); | ||
1122 | } else { | ||
1123 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
1124 | "plugin interrupt but phy is gone\n"); | ||
1125 | mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, | ||
1126 | NULL); | ||
1127 | } | ||
1128 | } else if (phy->irq_status & PHYEV_BROAD_CH) | ||
1129 | sas_ha->notify_port_event(sas_phy, | ||
1130 | PORTE_BROADCAST_RCVD); | ||
1131 | } | ||
1132 | mvs_write_port_irq_stat(mvi, port_no, phy->irq_status); | ||
1133 | } | ||
1134 | |||
1135 | static void mvs_int_sata(struct mvs_info *mvi) | ||
1136 | { | ||
1137 | /* FIXME */ | ||
1138 | } | ||
1139 | |||
1140 | static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task, | ||
1141 | struct mvs_slot_info *slot, u32 slot_idx) | ||
1142 | { | ||
1143 | if (!sas_protocol_ata(task->task_proto)) | ||
1144 | if (slot->n_elem) | ||
1145 | pci_unmap_sg(mvi->pdev, task->scatter, | ||
1146 | slot->n_elem, task->data_dir); | ||
1147 | |||
1148 | switch (task->task_proto) { | ||
1149 | case SAS_PROTOCOL_SMP: | ||
1150 | pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, | ||
1151 | PCI_DMA_FROMDEVICE); | ||
1152 | pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, | ||
1153 | PCI_DMA_TODEVICE); | ||
1154 | break; | ||
1155 | |||
1156 | case SAS_PROTOCOL_SATA: | ||
1157 | case SAS_PROTOCOL_STP: | ||
1158 | case SAS_PROTOCOL_SSP: | ||
1159 | default: | ||
1160 | /* do nothing */ | ||
1161 | break; | ||
1162 | } | ||
1163 | |||
1164 | slot->task = NULL; | ||
1165 | mvs_tag_clear(mvi, slot_idx); | ||
1166 | } | ||
1167 | |||
1168 | static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, | ||
1169 | u32 slot_idx) | ||
1170 | { | ||
1171 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; | ||
1172 | u64 err_dw0 = *(u32 *) slot->response; | ||
1173 | void __iomem *regs = mvi->regs; | ||
1174 | u32 tmp; | ||
1175 | |||
1176 | if (err_dw0 & CMD_ISS_STPD) | ||
1177 | if (sas_protocol_ata(task->task_proto)) { | ||
1178 | tmp = mr32(INT_STAT_SRS); | ||
1179 | mw32(INT_STAT_SRS, tmp & 0xFFFF); | ||
1180 | } | ||
1181 | |||
1182 | mvs_hba_sb_dump(mvi, slot_idx, task->task_proto); | ||
1183 | } | ||
1184 | |||
1185 | static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc) | ||
1186 | { | ||
1187 | u32 slot_idx = rx_desc & RXQ_SLOT_MASK; | ||
1188 | struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; | ||
1189 | struct sas_task *task = slot->task; | ||
1190 | struct task_status_struct *tstat = &task->task_status; | ||
1191 | struct mvs_port *port = &mvi->port[task->dev->port->id]; | ||
1192 | bool aborted; | ||
1193 | void *to; | ||
1194 | |||
1195 | spin_lock(&task->task_state_lock); | ||
1196 | aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; | ||
1197 | if (!aborted) { | ||
1198 | task->task_state_flags &= | ||
1199 | ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); | ||
1200 | task->task_state_flags |= SAS_TASK_STATE_DONE; | ||
1201 | } | ||
1202 | spin_unlock(&task->task_state_lock); | ||
1203 | |||
1204 | if (aborted) | ||
1205 | return -1; | ||
1206 | |||
1207 | memset(tstat, 0, sizeof(*tstat)); | ||
1208 | tstat->resp = SAS_TASK_COMPLETE; | ||
1209 | |||
1210 | |||
1211 | if (unlikely(!port->port_attached)) { | ||
1212 | tstat->stat = SAS_PHY_DOWN; | ||
1213 | goto out; | ||
1214 | } | ||
1215 | |||
1216 | /* error info record present */ | ||
1217 | if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) { | ||
1218 | tstat->stat = SAM_CHECK_COND; | ||
1219 | mvs_slot_err(mvi, task, slot_idx); | ||
1220 | goto out; | ||
1221 | } | ||
1222 | |||
1223 | switch (task->task_proto) { | ||
1224 | case SAS_PROTOCOL_SSP: | ||
1225 | /* hw says status == 0, datapres == 0 */ | ||
1226 | if (rx_desc & RXQ_GOOD) { | ||
1227 | tstat->stat = SAM_GOOD; | ||
1228 | tstat->resp = SAS_TASK_COMPLETE; | ||
1229 | } | ||
1230 | /* response frame present */ | ||
1231 | else if (rx_desc & RXQ_RSP) { | ||
1232 | struct ssp_response_iu *iu = | ||
1233 | slot->response + sizeof(struct mvs_err_info); | ||
1234 | sas_ssp_task_response(&mvi->pdev->dev, task, iu); | ||
1235 | } | ||
1236 | |||
1237 | /* should never happen? */ | ||
1238 | else | ||
1239 | tstat->stat = SAM_CHECK_COND; | ||
1240 | break; | ||
1241 | |||
1242 | case SAS_PROTOCOL_SMP: { | ||
1243 | struct scatterlist *sg_resp = &task->smp_task.smp_resp; | ||
1244 | tstat->stat = SAM_GOOD; | ||
1245 | to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); | ||
1246 | memcpy(to + sg_resp->offset, | ||
1247 | slot->response + sizeof(struct mvs_err_info), | ||
1248 | sg_dma_len(sg_resp)); | ||
1249 | kunmap_atomic(to, KM_IRQ0); | ||
1250 | break; | ||
1251 | } | ||
1252 | |||
1253 | case SAS_PROTOCOL_SATA: | ||
1254 | case SAS_PROTOCOL_STP: | ||
1255 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { | ||
1256 | struct ata_task_resp *resp = | ||
1257 | (struct ata_task_resp *)tstat->buf; | ||
1258 | |||
1259 | if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) == | ||
1260 | RXQ_DONE) | ||
1261 | tstat->stat = SAM_GOOD; | ||
1262 | else | ||
1263 | tstat->stat = SAM_CHECK_COND; | ||
1264 | |||
1265 | resp->frame_len = sizeof(struct dev_to_host_fis); | ||
1266 | memcpy(&resp->ending_fis[0], | ||
1267 | SATA_RECEIVED_D2H_FIS(port->taskfileset), | ||
1268 | sizeof(struct dev_to_host_fis)); | ||
1269 | if (resp->ending_fis[2] & ATA_ERR) | ||
1270 | mvs_hexdump(16, resp->ending_fis, 0); | ||
1271 | break; | ||
1272 | } | ||
1273 | |||
1274 | default: | ||
1275 | tstat->stat = SAM_CHECK_COND; | ||
1276 | break; | ||
1277 | } | ||
1278 | |||
1279 | out: | ||
1280 | mvs_slot_free(mvi, task, slot, slot_idx); | ||
1281 | task->task_done(task); | ||
1282 | return tstat->stat; | ||
1283 | } | ||
1284 | |||
1285 | static void mvs_int_full(struct mvs_info *mvi) | ||
1286 | { | ||
1287 | void __iomem *regs = mvi->regs; | ||
1288 | u32 tmp, stat; | ||
1289 | int i; | ||
1290 | |||
1291 | stat = mr32(INT_STAT); | ||
1292 | |||
1293 | mvs_int_rx(mvi, false); | ||
1294 | |||
1295 | for (i = 0; i < MVS_MAX_PORTS; i++) { | ||
1296 | tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); | ||
1297 | if (tmp) | ||
1298 | mvs_int_port(mvi, i, tmp); | ||
1299 | } | ||
1300 | |||
1301 | if (stat & CINT_SRS) | ||
1302 | mvs_int_sata(mvi); | ||
1303 | |||
1304 | mw32(INT_STAT, stat); | ||
1305 | } | ||
1306 | |||
1307 | static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | ||
1308 | { | ||
1309 | void __iomem *regs = mvi->regs; | ||
1310 | u32 rx_prod_idx, rx_desc; | ||
1311 | bool attn = false; | ||
1312 | struct pci_dev *pdev = mvi->pdev; | ||
1313 | |||
1314 | /* the first dword in the RX ring is special: it contains | ||
1315 | * a mirror of the hardware's RX producer index, so that | ||
1316 | * we don't have to stall the CPU reading that register. | ||
1317 | * The actual RX ring is offset by one dword, due to this. | ||
1318 | */ | ||
1319 | rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; | ||
1320 | if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */ | ||
1321 | mvi->rx_cons = 0xfff; | ||
1322 | return 0; | ||
1323 | } | ||
1324 | |||
1325 | /* The CMPL_Q may come late, read from register and try again | ||
1326 | * note: if coalescing is enabled, | ||
1327 | * it will need to read from register every time for sure | ||
1328 | */ | ||
1329 | if (mvi->rx_cons == rx_prod_idx) | ||
1330 | return 0; | ||
1331 | |||
1332 | if (mvi->rx_cons == 0xfff) | ||
1333 | mvi->rx_cons = MVS_RX_RING_SZ - 1; | ||
1334 | |||
1335 | while (mvi->rx_cons != rx_prod_idx) { | ||
1336 | |||
1337 | /* increment our internal RX consumer pointer */ | ||
1338 | mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1); | ||
1339 | |||
1340 | rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]); | ||
1341 | |||
1342 | mvs_hba_cq_dump(mvi); | ||
1343 | |||
1344 | if (unlikely(rx_desc & RXQ_DONE)) | ||
1345 | mvs_slot_complete(mvi, rx_desc); | ||
1346 | if (rx_desc & RXQ_ATTN) { | ||
1347 | attn = true; | ||
1348 | dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", | ||
1349 | rx_desc); | ||
1350 | } else if (rx_desc & RXQ_ERR) { | ||
1351 | dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", | ||
1352 | rx_desc); | ||
1353 | } | ||
1354 | } | ||
1355 | |||
1356 | if (attn && self_clear) | ||
1357 | mvs_int_full(mvi); | ||
1358 | |||
1359 | return 0; | ||
1360 | } | ||
1361 | |||
1362 | static irqreturn_t mvs_interrupt(int irq, void *opaque) | ||
1363 | { | ||
1364 | struct mvs_info *mvi = opaque; | ||
1365 | void __iomem *regs = mvi->regs; | ||
1366 | u32 stat; | ||
1367 | |||
1368 | stat = mr32(GBL_INT_STAT); | ||
1369 | |||
1370 | /* clear CMD_CMPLT ASAP */ | ||
1371 | mw32_f(INT_STAT, CINT_DONE); | ||
1372 | |||
1373 | if (stat == 0 || stat == 0xffffffff) | ||
1374 | return IRQ_NONE; | ||
1375 | |||
1376 | spin_lock(&mvi->lock); | ||
1377 | |||
1378 | mvs_int_full(mvi); | ||
1379 | |||
1380 | spin_unlock(&mvi->lock); | ||
1381 | |||
1382 | return IRQ_HANDLED; | ||
1383 | } | ||
1384 | |||
1385 | #ifndef MVS_DISABLE_MSI | ||
1386 | static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) | ||
1387 | { | ||
1388 | struct mvs_info *mvi = opaque; | ||
1389 | |||
1390 | spin_lock(&mvi->lock); | ||
1391 | |||
1392 | mvs_int_rx(mvi, true); | ||
1393 | |||
1394 | spin_unlock(&mvi->lock); | ||
1395 | |||
1396 | return IRQ_HANDLED; | ||
1397 | } | ||
1398 | #endif | ||
1399 | |||
1400 | struct mvs_task_exec_info { | ||
1401 | struct sas_task *task; | ||
1402 | struct mvs_cmd_hdr *hdr; | ||
1403 | struct mvs_port *port; | ||
1404 | u32 tag; | ||
1405 | int n_elem; | ||
1406 | }; | ||
1407 | |||
1408 | static int mvs_task_prep_smp(struct mvs_info *mvi, | ||
1409 | struct mvs_task_exec_info *tei) | ||
1410 | { | ||
1411 | int elem, rc, i; | ||
1412 | struct sas_task *task = tei->task; | ||
1413 | struct mvs_cmd_hdr *hdr = tei->hdr; | ||
1414 | struct scatterlist *sg_req, *sg_resp; | ||
1415 | u32 req_len, resp_len, tag = tei->tag; | ||
1416 | void *buf_tmp; | ||
1417 | u8 *buf_oaf; | ||
1418 | dma_addr_t buf_tmp_dma; | ||
1419 | struct mvs_prd *buf_prd; | ||
1420 | struct scatterlist *sg; | ||
1421 | struct mvs_slot_info *slot = &mvi->slot_info[tag]; | ||
1422 | struct asd_sas_port *sas_port = task->dev->port; | ||
1423 | u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); | ||
1424 | #if _MV_DUMP | ||
1425 | u8 *buf_cmd; | ||
1426 | void *from; | ||
1427 | #endif | ||
1428 | /* | ||
1429 | * DMA-map SMP request, response buffers | ||
1430 | */ | ||
1431 | sg_req = &task->smp_task.smp_req; | ||
1432 | elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); | ||
1433 | if (!elem) | ||
1434 | return -ENOMEM; | ||
1435 | req_len = sg_dma_len(sg_req); | ||
1436 | |||
1437 | sg_resp = &task->smp_task.smp_resp; | ||
1438 | elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); | ||
1439 | if (!elem) { | ||
1440 | rc = -ENOMEM; | ||
1441 | goto err_out; | ||
1442 | } | ||
1443 | resp_len = sg_dma_len(sg_resp); | ||
1444 | |||
1445 | /* must be in dwords */ | ||
1446 | if ((req_len & 0x3) || (resp_len & 0x3)) { | ||
1447 | rc = -EINVAL; | ||
1448 | goto err_out_2; | ||
1449 | } | ||
1450 | |||
1451 | /* | ||
1452 | * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs | ||
1453 | */ | ||
1454 | |||
1455 | /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ | ||
1456 | buf_tmp = slot->buf; | ||
1457 | buf_tmp_dma = slot->buf_dma; | ||
1458 | |||
1459 | #if _MV_DUMP | ||
1460 | buf_cmd = buf_tmp; | ||
1461 | hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1462 | buf_tmp += req_len; | ||
1463 | buf_tmp_dma += req_len; | ||
1464 | slot->cmd_size = req_len; | ||
1465 | #else | ||
1466 | hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); | ||
1467 | #endif | ||
1468 | |||
1469 | /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ | ||
1470 | buf_oaf = buf_tmp; | ||
1471 | hdr->open_frame = cpu_to_le64(buf_tmp_dma); | ||
1472 | |||
1473 | buf_tmp += MVS_OAF_SZ; | ||
1474 | buf_tmp_dma += MVS_OAF_SZ; | ||
1475 | |||
1476 | /* region 3: PRD table ********************************************* */ | ||
1477 | buf_prd = buf_tmp; | ||
1478 | if (tei->n_elem) | ||
1479 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1480 | else | ||
1481 | hdr->prd_tbl = 0; | ||
1482 | |||
1483 | i = sizeof(struct mvs_prd) * tei->n_elem; | ||
1484 | buf_tmp += i; | ||
1485 | buf_tmp_dma += i; | ||
1486 | |||
1487 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ | ||
1488 | slot->response = buf_tmp; | ||
1489 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | ||
1490 | |||
1491 | /* | ||
1492 | * Fill in TX ring and command slot header | ||
1493 | */ | ||
1494 | slot->tx = mvi->tx_prod; | ||
1495 | mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | | ||
1496 | TXQ_MODE_I | tag | | ||
1497 | (sas_port->phy_mask << TXQ_PHY_SHIFT)); | ||
1498 | |||
1499 | hdr->flags |= flags; | ||
1500 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); | ||
1501 | hdr->tags = cpu_to_le32(tag); | ||
1502 | hdr->data_len = 0; | ||
1503 | |||
1504 | /* generate open address frame hdr (first 12 bytes) */ | ||
1505 | buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ | ||
1506 | buf_oaf[1] = task->dev->linkrate & 0xf; | ||
1507 | *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ | ||
1508 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | ||
1509 | |||
1510 | /* fill in PRD (scatter/gather) table, if any */ | ||
1511 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | ||
1512 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
1513 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | ||
1514 | buf_prd++; | ||
1515 | } | ||
1516 | |||
1517 | #if _MV_DUMP | ||
1518 | /* copy cmd table */ | ||
1519 | from = kmap_atomic(sg_page(sg_req), KM_IRQ0); | ||
1520 | memcpy(buf_cmd, from + sg_req->offset, req_len); | ||
1521 | kunmap_atomic(from, KM_IRQ0); | ||
1522 | #endif | ||
1523 | return 0; | ||
1524 | |||
1525 | err_out_2: | ||
1526 | pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, | ||
1527 | PCI_DMA_FROMDEVICE); | ||
1528 | err_out: | ||
1529 | pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, | ||
1530 | PCI_DMA_TODEVICE); | ||
1531 | return rc; | ||
1532 | } | ||
1533 | |||
1534 | static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) | ||
1535 | { | ||
1536 | void __iomem *regs = mvi->regs; | ||
1537 | u32 tmp, offs; | ||
1538 | u8 *tfs = &port->taskfileset; | ||
1539 | |||
1540 | if (*tfs == MVS_ID_NOT_MAPPED) | ||
1541 | return; | ||
1542 | |||
1543 | offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); | ||
1544 | if (*tfs < 16) { | ||
1545 | tmp = mr32(PCS); | ||
1546 | mw32(PCS, tmp & ~offs); | ||
1547 | } else { | ||
1548 | tmp = mr32(CTL); | ||
1549 | mw32(CTL, tmp & ~offs); | ||
1550 | } | ||
1551 | |||
1552 | tmp = mr32(INT_STAT_SRS) & (1U << *tfs); | ||
1553 | if (tmp) | ||
1554 | mw32(INT_STAT_SRS, tmp); | ||
1555 | |||
1556 | *tfs = MVS_ID_NOT_MAPPED; | ||
1557 | } | ||
1558 | |||
1559 | static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) | ||
1560 | { | ||
1561 | int i; | ||
1562 | u32 tmp, offs; | ||
1563 | void __iomem *regs = mvi->regs; | ||
1564 | |||
1565 | if (port->taskfileset != MVS_ID_NOT_MAPPED) | ||
1566 | return 0; | ||
1567 | |||
1568 | tmp = mr32(PCS); | ||
1569 | |||
1570 | for (i = 0; i < mvi->chip->srs_sz; i++) { | ||
1571 | if (i == 16) | ||
1572 | tmp = mr32(CTL); | ||
1573 | offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); | ||
1574 | if (!(tmp & offs)) { | ||
1575 | port->taskfileset = i; | ||
1576 | |||
1577 | if (i < 16) | ||
1578 | mw32(PCS, tmp | offs); | ||
1579 | else | ||
1580 | mw32(CTL, tmp | offs); | ||
1581 | tmp = mr32(INT_STAT_SRS) & (1U << i); | ||
1582 | if (tmp) | ||
1583 | mw32(INT_STAT_SRS, tmp); | ||
1584 | return 0; | ||
1585 | } | ||
1586 | } | ||
1587 | return MVS_ID_NOT_MAPPED; | ||
1588 | } | ||
1589 | |||
1590 | static u32 mvs_get_ncq_tag(struct sas_task *task) | ||
1591 | { | ||
1592 | u32 tag = 0; | ||
1593 | struct ata_queued_cmd *qc = task->uldd_task; | ||
1594 | |||
1595 | if (qc) | ||
1596 | tag = qc->tag; | ||
1597 | |||
1598 | return tag; | ||
1599 | } | ||
1600 | |||
1601 | static int mvs_task_prep_ata(struct mvs_info *mvi, | ||
1602 | struct mvs_task_exec_info *tei) | ||
1603 | { | ||
1604 | struct sas_task *task = tei->task; | ||
1605 | struct domain_device *dev = task->dev; | ||
1606 | struct mvs_cmd_hdr *hdr = tei->hdr; | ||
1607 | struct asd_sas_port *sas_port = dev->port; | ||
1608 | struct mvs_slot_info *slot; | ||
1609 | struct scatterlist *sg; | ||
1610 | struct mvs_prd *buf_prd; | ||
1611 | struct mvs_port *port = tei->port; | ||
1612 | u32 tag = tei->tag; | ||
1613 | u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); | ||
1614 | void *buf_tmp; | ||
1615 | u8 *buf_cmd, *buf_oaf; | ||
1616 | dma_addr_t buf_tmp_dma; | ||
1617 | u32 i, req_len, resp_len; | ||
1618 | const u32 max_resp_len = SB_RFB_MAX; | ||
1619 | |||
1620 | if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) | ||
1621 | return -EBUSY; | ||
1622 | |||
1623 | slot = &mvi->slot_info[tag]; | ||
1624 | slot->tx = mvi->tx_prod; | ||
1625 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | | ||
1626 | (TXQ_CMD_STP << TXQ_CMD_SHIFT) | | ||
1627 | (sas_port->phy_mask << TXQ_PHY_SHIFT) | | ||
1628 | (port->taskfileset << TXQ_SRS_SHIFT)); | ||
1629 | |||
1630 | if (task->ata_task.use_ncq) | ||
1631 | flags |= MCH_FPDMA; | ||
1632 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { | ||
1633 | if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) | ||
1634 | flags |= MCH_ATAPI; | ||
1635 | } | ||
1636 | |||
1637 | /* FIXME: fill in port multiplier number */ | ||
1638 | |||
1639 | hdr->flags = cpu_to_le32(flags); | ||
1640 | |||
1641 | /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ | ||
1642 | if (task->ata_task.use_ncq) { | ||
1643 | hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task)); | ||
1644 | /*Fill in task file */ | ||
1645 | task->ata_task.fis.sector_count = hdr->tags << 3; | ||
1646 | } else | ||
1647 | hdr->tags = cpu_to_le32(tag); | ||
1648 | hdr->data_len = cpu_to_le32(task->total_xfer_len); | ||
1649 | |||
1650 | /* | ||
1651 | * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs | ||
1652 | */ | ||
1653 | |||
1654 | /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ | ||
1655 | buf_cmd = buf_tmp = slot->buf; | ||
1656 | buf_tmp_dma = slot->buf_dma; | ||
1657 | |||
1658 | hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1659 | |||
1660 | buf_tmp += MVS_ATA_CMD_SZ; | ||
1661 | buf_tmp_dma += MVS_ATA_CMD_SZ; | ||
1662 | #if _MV_DUMP | ||
1663 | slot->cmd_size = MVS_ATA_CMD_SZ; | ||
1664 | #endif | ||
1665 | |||
1666 | /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ | ||
1667 | /* used for STP. unused for SATA? */ | ||
1668 | buf_oaf = buf_tmp; | ||
1669 | hdr->open_frame = cpu_to_le64(buf_tmp_dma); | ||
1670 | |||
1671 | buf_tmp += MVS_OAF_SZ; | ||
1672 | buf_tmp_dma += MVS_OAF_SZ; | ||
1673 | |||
1674 | /* region 3: PRD table ********************************************* */ | ||
1675 | buf_prd = buf_tmp; | ||
1676 | if (tei->n_elem) | ||
1677 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1678 | else | ||
1679 | hdr->prd_tbl = 0; | ||
1680 | |||
1681 | i = sizeof(struct mvs_prd) * tei->n_elem; | ||
1682 | buf_tmp += i; | ||
1683 | buf_tmp_dma += i; | ||
1684 | |||
1685 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ | ||
1686 | /* FIXME: probably unused, for SATA. kept here just in case | ||
1687 | * we get a STP/SATA error information record | ||
1688 | */ | ||
1689 | slot->response = buf_tmp; | ||
1690 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | ||
1691 | |||
1692 | req_len = sizeof(struct host_to_dev_fis); | ||
1693 | resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - | ||
1694 | sizeof(struct mvs_err_info) - i; | ||
1695 | |||
1696 | /* request, response lengths */ | ||
1697 | resp_len = min(resp_len, max_resp_len); | ||
1698 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); | ||
1699 | |||
1700 | task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ | ||
1701 | /* fill in command FIS and ATAPI CDB */ | ||
1702 | memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); | ||
1703 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) | ||
1704 | memcpy(buf_cmd + STP_ATAPI_CMD, | ||
1705 | task->ata_task.atapi_packet, 16); | ||
1706 | |||
1707 | /* generate open address frame hdr (first 12 bytes) */ | ||
1708 | buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ | ||
1709 | buf_oaf[1] = task->dev->linkrate & 0xf; | ||
1710 | *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); | ||
1711 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | ||
1712 | |||
1713 | /* fill in PRD (scatter/gather) table, if any */ | ||
1714 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | ||
1715 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
1716 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | ||
1717 | buf_prd++; | ||
1718 | } | ||
1719 | |||
1720 | return 0; | ||
1721 | } | ||
1722 | |||
1723 | static int mvs_task_prep_ssp(struct mvs_info *mvi, | ||
1724 | struct mvs_task_exec_info *tei) | ||
1725 | { | ||
1726 | struct sas_task *task = tei->task; | ||
1727 | struct mvs_cmd_hdr *hdr = tei->hdr; | ||
1728 | struct mvs_port *port = tei->port; | ||
1729 | struct mvs_slot_info *slot; | ||
1730 | struct scatterlist *sg; | ||
1731 | struct mvs_prd *buf_prd; | ||
1732 | struct ssp_frame_hdr *ssp_hdr; | ||
1733 | void *buf_tmp; | ||
1734 | u8 *buf_cmd, *buf_oaf, fburst = 0; | ||
1735 | dma_addr_t buf_tmp_dma; | ||
1736 | u32 flags; | ||
1737 | u32 resp_len, req_len, i, tag = tei->tag; | ||
1738 | const u32 max_resp_len = SB_RFB_MAX; | ||
1739 | |||
1740 | slot = &mvi->slot_info[tag]; | ||
1741 | |||
1742 | slot->tx = mvi->tx_prod; | ||
1743 | mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | | ||
1744 | (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | | ||
1745 | (port->wide_port_phymap << TXQ_PHY_SHIFT)); | ||
1746 | |||
1747 | flags = MCH_RETRY; | ||
1748 | if (task->ssp_task.enable_first_burst) { | ||
1749 | flags |= MCH_FBURST; | ||
1750 | fburst = (1 << 7); | ||
1751 | } | ||
1752 | hdr->flags = cpu_to_le32(flags | | ||
1753 | (tei->n_elem << MCH_PRD_LEN_SHIFT) | | ||
1754 | (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); | ||
1755 | |||
1756 | hdr->tags = cpu_to_le32(tag); | ||
1757 | hdr->data_len = cpu_to_le32(task->total_xfer_len); | ||
1758 | |||
1759 | /* | ||
1760 | * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs | ||
1761 | */ | ||
1762 | |||
1763 | /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ | ||
1764 | buf_cmd = buf_tmp = slot->buf; | ||
1765 | buf_tmp_dma = slot->buf_dma; | ||
1766 | |||
1767 | hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1768 | |||
1769 | buf_tmp += MVS_SSP_CMD_SZ; | ||
1770 | buf_tmp_dma += MVS_SSP_CMD_SZ; | ||
1771 | #if _MV_DUMP | ||
1772 | slot->cmd_size = MVS_SSP_CMD_SZ; | ||
1773 | #endif | ||
1774 | |||
1775 | /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ | ||
1776 | buf_oaf = buf_tmp; | ||
1777 | hdr->open_frame = cpu_to_le64(buf_tmp_dma); | ||
1778 | |||
1779 | buf_tmp += MVS_OAF_SZ; | ||
1780 | buf_tmp_dma += MVS_OAF_SZ; | ||
1781 | |||
1782 | /* region 3: PRD table ********************************************* */ | ||
1783 | buf_prd = buf_tmp; | ||
1784 | if (tei->n_elem) | ||
1785 | hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); | ||
1786 | else | ||
1787 | hdr->prd_tbl = 0; | ||
1788 | |||
1789 | i = sizeof(struct mvs_prd) * tei->n_elem; | ||
1790 | buf_tmp += i; | ||
1791 | buf_tmp_dma += i; | ||
1792 | |||
1793 | /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ | ||
1794 | slot->response = buf_tmp; | ||
1795 | hdr->status_buf = cpu_to_le64(buf_tmp_dma); | ||
1796 | |||
1797 | resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - | ||
1798 | sizeof(struct mvs_err_info) - i; | ||
1799 | resp_len = min(resp_len, max_resp_len); | ||
1800 | |||
1801 | req_len = sizeof(struct ssp_frame_hdr) + 28; | ||
1802 | |||
1803 | /* request, response lengths */ | ||
1804 | hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); | ||
1805 | |||
1806 | /* generate open address frame hdr (first 12 bytes) */ | ||
1807 | buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ | ||
1808 | buf_oaf[1] = task->dev->linkrate & 0xf; | ||
1809 | *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); | ||
1810 | memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); | ||
1811 | |||
1812 | /* fill in SSP frame header (Command Table.SSP frame header) */ | ||
1813 | ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; | ||
1814 | ssp_hdr->frame_type = SSP_COMMAND; | ||
1815 | memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, | ||
1816 | HASHED_SAS_ADDR_SIZE); | ||
1817 | memcpy(ssp_hdr->hashed_src_addr, | ||
1818 | task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); | ||
1819 | ssp_hdr->tag = cpu_to_be16(tag); | ||
1820 | |||
1821 | /* fill in command frame IU */ | ||
1822 | buf_cmd += sizeof(*ssp_hdr); | ||
1823 | memcpy(buf_cmd, &task->ssp_task.LUN, 8); | ||
1824 | buf_cmd[9] = fburst | task->ssp_task.task_attr | | ||
1825 | (task->ssp_task.task_prio << 3); | ||
1826 | memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); | ||
1827 | |||
1828 | /* fill in PRD (scatter/gather) table, if any */ | ||
1829 | for_each_sg(task->scatter, sg, tei->n_elem, i) { | ||
1830 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); | ||
1831 | buf_prd->len = cpu_to_le32(sg_dma_len(sg)); | ||
1832 | buf_prd++; | ||
1833 | } | ||
1834 | |||
1835 | return 0; | ||
1836 | } | ||
1837 | |||
1838 | static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) | ||
1839 | { | ||
1840 | struct domain_device *dev = task->dev; | ||
1841 | struct mvs_info *mvi = dev->port->ha->lldd_ha; | ||
1842 | struct pci_dev *pdev = mvi->pdev; | ||
1843 | void __iomem *regs = mvi->regs; | ||
1844 | struct mvs_task_exec_info tei; | ||
1845 | struct sas_task *t = task; | ||
1846 | u32 tag = 0xdeadbeef, rc, n_elem = 0; | ||
1847 | unsigned long flags; | ||
1848 | u32 n = num, pass = 0; | ||
1849 | |||
1850 | spin_lock_irqsave(&mvi->lock, flags); | ||
1851 | |||
1852 | do { | ||
1853 | tei.port = &mvi->port[dev->port->id]; | ||
1854 | |||
1855 | if (!tei.port->port_attached) { | ||
1856 | struct task_status_struct *ts = &t->task_status; | ||
1857 | ts->stat = SAS_PHY_DOWN; | ||
1858 | t->task_done(t); | ||
1859 | rc = 0; | ||
1860 | goto exec_exit; | ||
1861 | } | ||
1862 | if (!sas_protocol_ata(t->task_proto)) { | ||
1863 | if (t->num_scatter) { | ||
1864 | n_elem = pci_map_sg(mvi->pdev, t->scatter, | ||
1865 | t->num_scatter, | ||
1866 | t->data_dir); | ||
1867 | if (!n_elem) { | ||
1868 | rc = -ENOMEM; | ||
1869 | goto err_out; | ||
1870 | } | ||
1871 | } | ||
1872 | } else { | ||
1873 | n_elem = t->num_scatter; | ||
1874 | } | ||
1875 | |||
1876 | rc = mvs_tag_alloc(mvi, &tag); | ||
1877 | if (rc) | ||
1878 | goto err_out; | ||
1879 | |||
1880 | mvi->slot_info[tag].task = t; | ||
1881 | mvi->slot_info[tag].n_elem = n_elem; | ||
1882 | memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ); | ||
1883 | tei.task = t; | ||
1884 | tei.hdr = &mvi->slot[tag]; | ||
1885 | tei.tag = tag; | ||
1886 | tei.n_elem = n_elem; | ||
1887 | |||
1888 | switch (t->task_proto) { | ||
1889 | case SAS_PROTOCOL_SMP: | ||
1890 | rc = mvs_task_prep_smp(mvi, &tei); | ||
1891 | break; | ||
1892 | case SAS_PROTOCOL_SSP: | ||
1893 | rc = mvs_task_prep_ssp(mvi, &tei); | ||
1894 | break; | ||
1895 | case SAS_PROTOCOL_SATA: | ||
1896 | case SAS_PROTOCOL_STP: | ||
1897 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | ||
1898 | rc = mvs_task_prep_ata(mvi, &tei); | ||
1899 | break; | ||
1900 | default: | ||
1901 | dev_printk(KERN_ERR, &pdev->dev, | ||
1902 | "unknown sas_task proto: 0x%x\n", | ||
1903 | t->task_proto); | ||
1904 | rc = -EINVAL; | ||
1905 | break; | ||
1906 | } | ||
1907 | |||
1908 | if (rc) | ||
1909 | goto err_out_tag; | ||
1910 | |||
1911 | /* TODO: select normal or high priority */ | ||
1912 | |||
1913 | spin_lock(&t->task_state_lock); | ||
1914 | t->task_state_flags |= SAS_TASK_AT_INITIATOR; | ||
1915 | spin_unlock(&t->task_state_lock); | ||
1916 | |||
1917 | if (n == 1) { | ||
1918 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1919 | mw32(TX_PROD_IDX, mvi->tx_prod); | ||
1920 | } | ||
1921 | mvs_hba_memory_dump(mvi, tag, t->task_proto); | ||
1922 | |||
1923 | ++pass; | ||
1924 | mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); | ||
1925 | |||
1926 | if (n == 1) | ||
1927 | break; | ||
1928 | |||
1929 | t = list_entry(t->list.next, struct sas_task, list); | ||
1930 | } while (--n); | ||
1931 | |||
1932 | return 0; | ||
1933 | |||
1934 | err_out_tag: | ||
1935 | mvs_tag_free(mvi, tag); | ||
1936 | err_out: | ||
1937 | dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); | ||
1938 | if (!sas_protocol_ata(t->task_proto)) | ||
1939 | if (n_elem) | ||
1940 | pci_unmap_sg(mvi->pdev, t->scatter, n_elem, | ||
1941 | t->data_dir); | ||
1942 | exec_exit: | ||
1943 | if (pass) | ||
1944 | mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); | ||
1945 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
1946 | return rc; | ||
1947 | } | ||
1948 | |||
1949 | static int mvs_task_abort(struct sas_task *task) | ||
1950 | { | ||
1951 | int rc = 1; | ||
1952 | unsigned long flags; | ||
1953 | struct mvs_info *mvi = task->dev->port->ha->lldd_ha; | ||
1954 | struct pci_dev *pdev = mvi->pdev; | ||
1955 | |||
1956 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
1957 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | ||
1958 | rc = TMF_RESP_FUNC_COMPLETE; | ||
1959 | goto out_done; | ||
1960 | } | ||
1961 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
1962 | |||
1963 | /*FIXME*/ | ||
1964 | rc = TMF_RESP_FUNC_COMPLETE; | ||
1965 | |||
1966 | switch (task->task_proto) { | ||
1967 | case SAS_PROTOCOL_SMP: | ||
1968 | dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! "); | ||
1969 | break; | ||
1970 | case SAS_PROTOCOL_SSP: | ||
1971 | dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! "); | ||
1972 | break; | ||
1973 | case SAS_PROTOCOL_SATA: | ||
1974 | case SAS_PROTOCOL_STP: | ||
1975 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ | ||
1976 | dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! " | ||
1977 | "Dump D2H FIS: \n"); | ||
1978 | mvs_hexdump(sizeof(struct host_to_dev_fis), | ||
1979 | (void *)&task->ata_task.fis, 0); | ||
1980 | dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); | ||
1981 | mvs_hexdump(16, task->ata_task.atapi_packet, 0); | ||
1982 | break; | ||
1983 | } | ||
1984 | default: | ||
1985 | break; | ||
1986 | } | ||
1987 | out_done: | ||
1988 | return rc; | ||
1989 | } | ||
1990 | |||
1991 | static void mvs_free(struct mvs_info *mvi) | ||
1992 | { | ||
1993 | int i; | ||
1994 | |||
1995 | if (!mvi) | ||
1996 | return; | ||
1997 | |||
1998 | for (i = 0; i < MVS_SLOTS; i++) { | ||
1999 | struct mvs_slot_info *slot = &mvi->slot_info[i]; | ||
2000 | |||
2001 | if (slot->buf) | ||
2002 | dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, | ||
2003 | slot->buf, slot->buf_dma); | ||
2004 | } | ||
2005 | |||
2006 | if (mvi->tx) | ||
2007 | dma_free_coherent(&mvi->pdev->dev, | ||
2008 | sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, | ||
2009 | mvi->tx, mvi->tx_dma); | ||
2010 | if (mvi->rx_fis) | ||
2011 | dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, | ||
2012 | mvi->rx_fis, mvi->rx_fis_dma); | ||
2013 | if (mvi->rx) | ||
2014 | dma_free_coherent(&mvi->pdev->dev, | ||
2015 | sizeof(*mvi->rx) * MVS_RX_RING_SZ, | ||
2016 | mvi->rx, mvi->rx_dma); | ||
2017 | if (mvi->slot) | ||
2018 | dma_free_coherent(&mvi->pdev->dev, | ||
2019 | sizeof(*mvi->slot) * MVS_SLOTS, | ||
2020 | mvi->slot, mvi->slot_dma); | ||
2021 | #ifdef MVS_ENABLE_PERI | ||
2022 | if (mvi->peri_regs) | ||
2023 | iounmap(mvi->peri_regs); | ||
2024 | #endif | ||
2025 | if (mvi->regs) | ||
2026 | iounmap(mvi->regs); | ||
2027 | if (mvi->shost) | ||
2028 | scsi_host_put(mvi->shost); | ||
2029 | kfree(mvi->sas.sas_port); | ||
2030 | kfree(mvi->sas.sas_phy); | ||
2031 | kfree(mvi); | ||
2032 | } | ||
2033 | |||
2034 | /* FIXME: locking? */ | ||
2035 | static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, | ||
2036 | void *funcdata) | ||
2037 | { | ||
2038 | struct mvs_info *mvi = sas_phy->ha->lldd_ha; | ||
2039 | int rc = 0, phy_id = sas_phy->id; | ||
2040 | u32 tmp; | ||
2041 | |||
2042 | tmp = mvs_read_phy_ctl(mvi, phy_id); | ||
2043 | |||
2044 | switch (func) { | ||
2045 | case PHY_FUNC_SET_LINK_RATE:{ | ||
2046 | struct sas_phy_linkrates *rates = funcdata; | ||
2047 | u32 lrmin = 0, lrmax = 0; | ||
2048 | |||
2049 | lrmin = (rates->minimum_linkrate << 8); | ||
2050 | lrmax = (rates->maximum_linkrate << 12); | ||
2051 | |||
2052 | if (lrmin) { | ||
2053 | tmp &= ~(0xf << 8); | ||
2054 | tmp |= lrmin; | ||
2055 | } | ||
2056 | if (lrmax) { | ||
2057 | tmp &= ~(0xf << 12); | ||
2058 | tmp |= lrmax; | ||
2059 | } | ||
2060 | mvs_write_phy_ctl(mvi, phy_id, tmp); | ||
2061 | break; | ||
2062 | } | ||
2063 | |||
2064 | case PHY_FUNC_HARD_RESET: | ||
2065 | if (tmp & PHY_RST_HARD) | ||
2066 | break; | ||
2067 | mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); | ||
2068 | break; | ||
2069 | |||
2070 | case PHY_FUNC_LINK_RESET: | ||
2071 | mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); | ||
2072 | break; | ||
2073 | |||
2074 | case PHY_FUNC_DISABLE: | ||
2075 | case PHY_FUNC_RELEASE_SPINUP_HOLD: | ||
2076 | default: | ||
2077 | rc = -EOPNOTSUPP; | ||
2078 | } | ||
2079 | |||
2080 | return rc; | ||
2081 | } | ||
2082 | |||
2083 | static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) | ||
2084 | { | ||
2085 | struct mvs_phy *phy = &mvi->phy[phy_id]; | ||
2086 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | ||
2087 | |||
2088 | sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; | ||
2089 | sas_phy->class = SAS; | ||
2090 | sas_phy->iproto = SAS_PROTOCOL_ALL; | ||
2091 | sas_phy->tproto = 0; | ||
2092 | sas_phy->type = PHY_TYPE_PHYSICAL; | ||
2093 | sas_phy->role = PHY_ROLE_INITIATOR; | ||
2094 | sas_phy->oob_mode = OOB_NOT_CONNECTED; | ||
2095 | sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; | ||
2096 | |||
2097 | sas_phy->id = phy_id; | ||
2098 | sas_phy->sas_addr = &mvi->sas_addr[0]; | ||
2099 | sas_phy->frame_rcvd = &phy->frame_rcvd[0]; | ||
2100 | sas_phy->ha = &mvi->sas; | ||
2101 | sas_phy->lldd_phy = phy; | ||
2102 | } | ||
2103 | |||
2104 | static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, | ||
2105 | const struct pci_device_id *ent) | ||
2106 | { | ||
2107 | struct mvs_info *mvi; | ||
2108 | unsigned long res_start, res_len, res_flag; | ||
2109 | struct asd_sas_phy **arr_phy; | ||
2110 | struct asd_sas_port **arr_port; | ||
2111 | const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; | ||
2112 | int i; | ||
2113 | |||
2114 | /* | ||
2115 | * alloc and init our per-HBA mvs_info struct | ||
2116 | */ | ||
2117 | |||
2118 | mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); | ||
2119 | if (!mvi) | ||
2120 | return NULL; | ||
2121 | |||
2122 | spin_lock_init(&mvi->lock); | ||
2123 | mvi->pdev = pdev; | ||
2124 | mvi->chip = chip; | ||
2125 | |||
2126 | if (pdev->device == 0x6440 && pdev->revision == 0) | ||
2127 | mvi->flags |= MVF_PHY_PWR_FIX; | ||
2128 | |||
2129 | /* | ||
2130 | * alloc and init SCSI, SAS glue | ||
2131 | */ | ||
2132 | |||
2133 | mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); | ||
2134 | if (!mvi->shost) | ||
2135 | goto err_out; | ||
2136 | |||
2137 | arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); | ||
2138 | arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); | ||
2139 | if (!arr_phy || !arr_port) | ||
2140 | goto err_out; | ||
2141 | |||
2142 | for (i = 0; i < MVS_MAX_PHYS; i++) { | ||
2143 | mvs_phy_init(mvi, i); | ||
2144 | arr_phy[i] = &mvi->phy[i].sas_phy; | ||
2145 | arr_port[i] = &mvi->port[i].sas_port; | ||
2146 | } | ||
2147 | |||
2148 | SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; | ||
2149 | mvi->shost->transportt = mvs_stt; | ||
2150 | mvi->shost->max_id = 21; | ||
2151 | mvi->shost->max_lun = ~0; | ||
2152 | mvi->shost->max_channel = 0; | ||
2153 | mvi->shost->max_cmd_len = 16; | ||
2154 | |||
2155 | mvi->sas.sas_ha_name = DRV_NAME; | ||
2156 | mvi->sas.dev = &pdev->dev; | ||
2157 | mvi->sas.lldd_module = THIS_MODULE; | ||
2158 | mvi->sas.sas_addr = &mvi->sas_addr[0]; | ||
2159 | mvi->sas.sas_phy = arr_phy; | ||
2160 | mvi->sas.sas_port = arr_port; | ||
2161 | mvi->sas.num_phys = chip->n_phy; | ||
2162 | mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1; | ||
2163 | mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; | ||
2164 | mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1; | ||
2165 | mvi->sas.lldd_ha = mvi; | ||
2166 | mvi->sas.core.shost = mvi->shost; | ||
2167 | |||
2168 | mvs_tag_init(mvi); | ||
2169 | |||
2170 | /* | ||
2171 | * ioremap main and peripheral registers | ||
2172 | */ | ||
2173 | |||
2174 | #ifdef MVS_ENABLE_PERI | ||
2175 | res_start = pci_resource_start(pdev, 2); | ||
2176 | res_len = pci_resource_len(pdev, 2); | ||
2177 | if (!res_start || !res_len) | ||
2178 | goto err_out; | ||
2179 | |||
2180 | mvi->peri_regs = ioremap_nocache(res_start, res_len); | ||
2181 | if (!mvi->peri_regs) | ||
2182 | goto err_out; | ||
2183 | #endif | ||
2184 | |||
2185 | res_start = pci_resource_start(pdev, 4); | ||
2186 | res_len = pci_resource_len(pdev, 4); | ||
2187 | if (!res_start || !res_len) | ||
2188 | goto err_out; | ||
2189 | |||
2190 | res_flag = pci_resource_flags(pdev, 4); | ||
2191 | if (res_flag & IORESOURCE_CACHEABLE) | ||
2192 | mvi->regs = ioremap(res_start, res_len); | ||
2193 | else | ||
2194 | mvi->regs = ioremap_nocache(res_start, res_len); | ||
2195 | |||
2196 | if (!mvi->regs) | ||
2197 | goto err_out; | ||
2198 | |||
2199 | /* | ||
2200 | * alloc and init our DMA areas | ||
2201 | */ | ||
2202 | |||
2203 | mvi->tx = dma_alloc_coherent(&pdev->dev, | ||
2204 | sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, | ||
2205 | &mvi->tx_dma, GFP_KERNEL); | ||
2206 | if (!mvi->tx) | ||
2207 | goto err_out; | ||
2208 | memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); | ||
2209 | |||
2210 | mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, | ||
2211 | &mvi->rx_fis_dma, GFP_KERNEL); | ||
2212 | if (!mvi->rx_fis) | ||
2213 | goto err_out; | ||
2214 | memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); | ||
2215 | |||
2216 | mvi->rx = dma_alloc_coherent(&pdev->dev, | ||
2217 | sizeof(*mvi->rx) * MVS_RX_RING_SZ, | ||
2218 | &mvi->rx_dma, GFP_KERNEL); | ||
2219 | if (!mvi->rx) | ||
2220 | goto err_out; | ||
2221 | memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ); | ||
2222 | |||
2223 | mvi->rx[0] = cpu_to_le32(0xfff); | ||
2224 | mvi->rx_cons = 0xfff; | ||
2225 | |||
2226 | mvi->slot = dma_alloc_coherent(&pdev->dev, | ||
2227 | sizeof(*mvi->slot) * MVS_SLOTS, | ||
2228 | &mvi->slot_dma, GFP_KERNEL); | ||
2229 | if (!mvi->slot) | ||
2230 | goto err_out; | ||
2231 | memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); | ||
2232 | |||
2233 | for (i = 0; i < MVS_SLOTS; i++) { | ||
2234 | struct mvs_slot_info *slot = &mvi->slot_info[i]; | ||
2235 | |||
2236 | slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, | ||
2237 | &slot->buf_dma, GFP_KERNEL); | ||
2238 | if (!slot->buf) | ||
2239 | goto err_out; | ||
2240 | memset(slot->buf, 0, MVS_SLOT_BUF_SZ); | ||
2241 | } | ||
2242 | |||
2243 | /* finally, read NVRAM to get our SAS address */ | ||
2244 | if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) | ||
2245 | goto err_out; | ||
2246 | return mvi; | ||
2247 | |||
2248 | err_out: | ||
2249 | mvs_free(mvi); | ||
2250 | return NULL; | ||
2251 | } | ||
2252 | |||
2253 | static u32 mvs_cr32(void __iomem *regs, u32 addr) | ||
2254 | { | ||
2255 | mw32(CMD_ADDR, addr); | ||
2256 | return mr32(CMD_DATA); | ||
2257 | } | ||
2258 | |||
2259 | static void mvs_cw32(void __iomem *regs, u32 addr, u32 val) | ||
2260 | { | ||
2261 | mw32(CMD_ADDR, addr); | ||
2262 | mw32(CMD_DATA, val); | ||
2263 | } | ||
2264 | |||
2265 | static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) | ||
2266 | { | ||
2267 | void __iomem *regs = mvi->regs; | ||
2268 | return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): | ||
2269 | mr32(P4_SER_CTLSTAT + (port - 4) * 4); | ||
2270 | } | ||
2271 | |||
2272 | static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) | ||
2273 | { | ||
2274 | void __iomem *regs = mvi->regs; | ||
2275 | if (port < 4) | ||
2276 | mw32(P0_SER_CTLSTAT + port * 4, val); | ||
2277 | else | ||
2278 | mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); | ||
2279 | } | ||
2280 | |||
2281 | static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) | ||
2282 | { | ||
2283 | void __iomem *regs = mvi->regs + off; | ||
2284 | void __iomem *regs2 = mvi->regs + off2; | ||
2285 | return (port < 4)?readl(regs + port * 8): | ||
2286 | readl(regs2 + (port - 4) * 8); | ||
2287 | } | ||
2288 | |||
2289 | static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, | ||
2290 | u32 port, u32 val) | ||
2291 | { | ||
2292 | void __iomem *regs = mvi->regs + off; | ||
2293 | void __iomem *regs2 = mvi->regs + off2; | ||
2294 | if (port < 4) | ||
2295 | writel(val, regs + port * 8); | ||
2296 | else | ||
2297 | writel(val, regs2 + (port - 4) * 8); | ||
2298 | } | ||
2299 | |||
2300 | static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) | ||
2301 | { | ||
2302 | return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port); | ||
2303 | } | ||
2304 | |||
2305 | static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) | ||
2306 | { | ||
2307 | mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); | ||
2308 | } | ||
2309 | |||
2310 | static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) | ||
2311 | { | ||
2312 | mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); | ||
2313 | } | ||
2314 | |||
2315 | static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) | ||
2316 | { | ||
2317 | return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port); | ||
2318 | } | ||
2319 | |||
2320 | static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) | ||
2321 | { | ||
2322 | mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); | ||
2323 | } | ||
2324 | |||
2325 | static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) | ||
2326 | { | ||
2327 | mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); | ||
2328 | } | ||
2329 | |||
2330 | static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) | ||
2331 | { | ||
2332 | return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port); | ||
2333 | } | ||
2334 | |||
2335 | static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) | ||
2336 | { | ||
2337 | mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); | ||
2338 | } | ||
2339 | |||
2340 | static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) | ||
2341 | { | ||
2342 | return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); | ||
2343 | } | ||
2344 | |||
2345 | static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) | ||
2346 | { | ||
2347 | mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); | ||
2348 | } | ||
2349 | |||
2350 | static void __devinit mvs_phy_hacks(struct mvs_info *mvi) | ||
2351 | { | ||
2352 | void __iomem *regs = mvi->regs; | ||
2353 | u32 tmp; | ||
2354 | |||
2355 | /* workaround for SATA R-ERR, to ignore phy glitch */ | ||
2356 | tmp = mvs_cr32(regs, CMD_PHY_TIMER); | ||
2357 | tmp &= ~(1 << 9); | ||
2358 | tmp |= (1 << 10); | ||
2359 | mvs_cw32(regs, CMD_PHY_TIMER, tmp); | ||
2360 | |||
2361 | /* enable retry 127 times */ | ||
2362 | mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); | ||
2363 | |||
2364 | /* extend open frame timeout to max */ | ||
2365 | tmp = mvs_cr32(regs, CMD_SAS_CTL0); | ||
2366 | tmp &= ~0xffff; | ||
2367 | tmp |= 0x3fff; | ||
2368 | mvs_cw32(regs, CMD_SAS_CTL0, tmp); | ||
2369 | |||
2370 | /* workaround for WDTIMEOUT , set to 550 ms */ | ||
2371 | mvs_cw32(regs, CMD_WD_TIMER, 0xffffff); | ||
2372 | |||
2373 | /* not to halt for different port op during wideport link change */ | ||
2374 | mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); | ||
2375 | |||
2376 | /* workaround for Seagate disk not-found OOB sequence, recv | ||
2377 | * COMINIT before sending out COMWAKE */ | ||
2378 | tmp = mvs_cr32(regs, CMD_PHY_MODE_21); | ||
2379 | tmp &= 0x0000ffff; | ||
2380 | tmp |= 0x00fa0000; | ||
2381 | mvs_cw32(regs, CMD_PHY_MODE_21, tmp); | ||
2382 | |||
2383 | tmp = mvs_cr32(regs, CMD_PHY_TIMER); | ||
2384 | tmp &= 0x1fffffff; | ||
2385 | tmp |= (2U << 29); /* 8 ms retry */ | ||
2386 | mvs_cw32(regs, CMD_PHY_TIMER, tmp); | ||
2387 | |||
2388 | /* TEST - for phy decoding error, adjust voltage levels */ | ||
2389 | mw32(P0_VSR_ADDR + 0, 0x8); | ||
2390 | mw32(P0_VSR_DATA + 0, 0x2F0); | ||
2391 | |||
2392 | mw32(P0_VSR_ADDR + 8, 0x8); | ||
2393 | mw32(P0_VSR_DATA + 8, 0x2F0); | ||
2394 | |||
2395 | mw32(P0_VSR_ADDR + 16, 0x8); | ||
2396 | mw32(P0_VSR_DATA + 16, 0x2F0); | ||
2397 | |||
2398 | mw32(P0_VSR_ADDR + 24, 0x8); | ||
2399 | mw32(P0_VSR_DATA + 24, 0x2F0); | ||
2400 | |||
2401 | } | ||
2402 | |||
2403 | static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) | ||
2404 | { | ||
2405 | void __iomem *regs = mvi->regs; | ||
2406 | u32 tmp; | ||
2407 | |||
2408 | tmp = mr32(PCS); | ||
2409 | if (mvi->chip->n_phy <= 4) | ||
2410 | tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); | ||
2411 | else | ||
2412 | tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); | ||
2413 | mw32(PCS, tmp); | ||
2414 | } | ||
2415 | |||
2416 | static void mvs_detect_porttype(struct mvs_info *mvi, int i) | ||
2417 | { | ||
2418 | void __iomem *regs = mvi->regs; | ||
2419 | u32 reg; | ||
2420 | struct mvs_phy *phy = &mvi->phy[i]; | ||
2421 | |||
2422 | /* TODO check & save device type */ | ||
2423 | reg = mr32(GBL_PORT_TYPE); | ||
2424 | |||
2425 | if (reg & MODE_SAS_SATA & (1 << i)) | ||
2426 | phy->phy_type |= PORT_TYPE_SAS; | ||
2427 | else | ||
2428 | phy->phy_type |= PORT_TYPE_SATA; | ||
2429 | } | ||
2430 | |||
2431 | static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) | ||
2432 | { | ||
2433 | u32 *s = (u32 *) buf; | ||
2434 | |||
2435 | if (!s) | ||
2436 | return NULL; | ||
2437 | |||
2438 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); | ||
2439 | s[3] = mvs_read_port_cfg_data(mvi, i); | ||
2440 | |||
2441 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); | ||
2442 | s[2] = mvs_read_port_cfg_data(mvi, i); | ||
2443 | |||
2444 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); | ||
2445 | s[1] = mvs_read_port_cfg_data(mvi, i); | ||
2446 | |||
2447 | mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); | ||
2448 | s[0] = mvs_read_port_cfg_data(mvi, i); | ||
2449 | |||
2450 | return (void *)s; | ||
2451 | } | ||
2452 | |||
2453 | static u32 mvs_is_sig_fis_received(u32 irq_status) | ||
2454 | { | ||
2455 | return irq_status & PHYEV_SIG_FIS; | ||
2456 | } | ||
2457 | |||
2458 | static void mvs_update_wideport(struct mvs_info *mvi, int i) | ||
2459 | { | ||
2460 | struct mvs_phy *phy = &mvi->phy[i]; | ||
2461 | struct mvs_port *port = phy->port; | ||
2462 | int j, no; | ||
2463 | |||
2464 | for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) | ||
2465 | if (no & 1) { | ||
2466 | mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); | ||
2467 | mvs_write_port_cfg_data(mvi, no, | ||
2468 | port->wide_port_phymap); | ||
2469 | } else { | ||
2470 | mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); | ||
2471 | mvs_write_port_cfg_data(mvi, no, 0); | ||
2472 | } | ||
2473 | } | ||
2474 | |||
2475 | static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) | ||
2476 | { | ||
2477 | u32 tmp; | ||
2478 | struct mvs_phy *phy = &mvi->phy[i]; | ||
2479 | struct mvs_port *port; | ||
2480 | |||
2481 | tmp = mvs_read_phy_ctl(mvi, i); | ||
2482 | |||
2483 | if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { | ||
2484 | if (!phy->port) | ||
2485 | phy->phy_attached = 1; | ||
2486 | return tmp; | ||
2487 | } | ||
2488 | |||
2489 | port = phy->port; | ||
2490 | if (port) { | ||
2491 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
2492 | port->wide_port_phymap &= ~(1U << i); | ||
2493 | if (!port->wide_port_phymap) | ||
2494 | port->port_attached = 0; | ||
2495 | mvs_update_wideport(mvi, i); | ||
2496 | } else if (phy->phy_type & PORT_TYPE_SATA) | ||
2497 | port->port_attached = 0; | ||
2498 | mvs_free_reg_set(mvi, phy->port); | ||
2499 | phy->port = NULL; | ||
2500 | phy->phy_attached = 0; | ||
2501 | phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); | ||
2502 | } | ||
2503 | return 0; | ||
2504 | } | ||
2505 | |||
2506 | static void mvs_update_phyinfo(struct mvs_info *mvi, int i, | ||
2507 | int get_st) | ||
2508 | { | ||
2509 | struct mvs_phy *phy = &mvi->phy[i]; | ||
2510 | struct pci_dev *pdev = mvi->pdev; | ||
2511 | u32 tmp, j; | ||
2512 | u64 tmp64; | ||
2513 | |||
2514 | mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); | ||
2515 | phy->dev_info = mvs_read_port_cfg_data(mvi, i); | ||
2516 | |||
2517 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); | ||
2518 | phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; | ||
2519 | |||
2520 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); | ||
2521 | phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); | ||
2522 | |||
2523 | if (get_st) { | ||
2524 | phy->irq_status = mvs_read_port_irq_stat(mvi, i); | ||
2525 | phy->phy_status = mvs_is_phy_ready(mvi, i); | ||
2526 | } | ||
2527 | |||
2528 | if (phy->phy_status) { | ||
2529 | u32 phy_st; | ||
2530 | struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; | ||
2531 | |||
2532 | mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); | ||
2533 | phy_st = mvs_read_port_cfg_data(mvi, i); | ||
2534 | |||
2535 | sas_phy->linkrate = | ||
2536 | (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> | ||
2537 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; | ||
2538 | |||
2539 | /* Updated attached_sas_addr */ | ||
2540 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); | ||
2541 | phy->att_dev_sas_addr = | ||
2542 | (u64) mvs_read_port_cfg_data(mvi, i) << 32; | ||
2543 | |||
2544 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); | ||
2545 | phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); | ||
2546 | |||
2547 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
2548 | "phy[%d] Get Attached Address 0x%llX ," | ||
2549 | " SAS Address 0x%llX\n", | ||
2550 | i, phy->att_dev_sas_addr, phy->dev_sas_addr); | ||
2551 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
2552 | "Rate = %x , type = %d\n", | ||
2553 | sas_phy->linkrate, phy->phy_type); | ||
2554 | |||
2555 | #if 1 | ||
2556 | /* | ||
2557 | * If the device is capable of supporting a wide port | ||
2558 | * on its phys, it may configure the phys as a wide port. | ||
2559 | */ | ||
2560 | if (phy->phy_type & PORT_TYPE_SAS) | ||
2561 | for (j = 0; j < mvi->chip->n_phy && j != i; ++j) { | ||
2562 | if ((mvi->phy[j].phy_attached) && | ||
2563 | (mvi->phy[j].phy_type & PORT_TYPE_SAS)) | ||
2564 | if (phy->att_dev_sas_addr == | ||
2565 | mvi->phy[j].att_dev_sas_addr - 1) { | ||
2566 | phy->att_dev_sas_addr = | ||
2567 | mvi->phy[j].att_dev_sas_addr; | ||
2568 | break; | ||
2569 | } | ||
2570 | } | ||
2571 | |||
2572 | #endif | ||
2573 | |||
2574 | tmp64 = cpu_to_be64(phy->att_dev_sas_addr); | ||
2575 | memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); | ||
2576 | |||
2577 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
2578 | mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); | ||
2579 | phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); | ||
2580 | phy->identify.device_type = | ||
2581 | phy->att_dev_info & PORT_DEV_TYPE_MASK; | ||
2582 | |||
2583 | if (phy->identify.device_type == SAS_END_DEV) | ||
2584 | phy->identify.target_port_protocols = | ||
2585 | SAS_PROTOCOL_SSP; | ||
2586 | else if (phy->identify.device_type != NO_DEVICE) | ||
2587 | phy->identify.target_port_protocols = | ||
2588 | SAS_PROTOCOL_SMP; | ||
2589 | if (phy_st & PHY_OOB_DTCTD) | ||
2590 | sas_phy->oob_mode = SAS_OOB_MODE; | ||
2591 | phy->frame_rcvd_size = | ||
2592 | sizeof(struct sas_identify_frame); | ||
2593 | } else if (phy->phy_type & PORT_TYPE_SATA) { | ||
2594 | phy->identify.target_port_protocols = SAS_PROTOCOL_STP; | ||
2595 | if (mvs_is_sig_fis_received(phy->irq_status)) { | ||
2596 | if (phy_st & PHY_OOB_DTCTD) | ||
2597 | sas_phy->oob_mode = SATA_OOB_MODE; | ||
2598 | phy->frame_rcvd_size = | ||
2599 | sizeof(struct dev_to_host_fis); | ||
2600 | mvs_get_d2h_reg(mvi, i, | ||
2601 | (void *)sas_phy->frame_rcvd); | ||
2602 | } else { | ||
2603 | dev_printk(KERN_DEBUG, &pdev->dev, | ||
2604 | "No sig fis\n"); | ||
2605 | } | ||
2606 | } | ||
2607 | /* workaround for HW phy decoding error on 1.5g disk drive */ | ||
2608 | mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); | ||
2609 | tmp = mvs_read_port_vsr_data(mvi, i); | ||
2610 | if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> | ||
2611 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == | ||
2612 | SAS_LINK_RATE_1_5_GBPS) | ||
2613 | tmp &= ~PHY_MODE6_DTL_SPEED; | ||
2614 | else | ||
2615 | tmp |= PHY_MODE6_DTL_SPEED; | ||
2616 | mvs_write_port_vsr_data(mvi, i, tmp); | ||
2617 | |||
2618 | } | ||
2619 | if (get_st) | ||
2620 | mvs_write_port_irq_stat(mvi, i, phy->irq_status); | ||
2621 | } | ||
2622 | |||
2623 | static void mvs_port_formed(struct asd_sas_phy *sas_phy) | ||
2624 | { | ||
2625 | struct sas_ha_struct *sas_ha = sas_phy->ha; | ||
2626 | struct mvs_info *mvi = sas_ha->lldd_ha; | ||
2627 | struct asd_sas_port *sas_port = sas_phy->port; | ||
2628 | struct mvs_phy *phy = sas_phy->lldd_phy; | ||
2629 | struct mvs_port *port = &mvi->port[sas_port->id]; | ||
2630 | unsigned long flags; | ||
2631 | |||
2632 | spin_lock_irqsave(&mvi->lock, flags); | ||
2633 | port->port_attached = 1; | ||
2634 | phy->port = port; | ||
2635 | port->taskfileset = MVS_ID_NOT_MAPPED; | ||
2636 | if (phy->phy_type & PORT_TYPE_SAS) { | ||
2637 | port->wide_port_phymap = sas_port->phy_mask; | ||
2638 | mvs_update_wideport(mvi, sas_phy->id); | ||
2639 | } | ||
2640 | spin_unlock_irqrestore(&mvi->lock, flags); | ||
2641 | } | ||
2642 | |||
2643 | static int __devinit mvs_hw_init(struct mvs_info *mvi) | ||
2644 | { | ||
2645 | void __iomem *regs = mvi->regs; | ||
2646 | int i; | ||
2647 | u32 tmp, cctl; | ||
2648 | |||
2649 | /* make sure interrupts are masked immediately (paranoia) */ | ||
2650 | mw32(GBL_CTL, 0); | ||
2651 | tmp = mr32(GBL_CTL); | ||
2652 | |||
2653 | /* Reset Controller */ | ||
2654 | if (!(tmp & HBA_RST)) { | ||
2655 | if (mvi->flags & MVF_PHY_PWR_FIX) { | ||
2656 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); | ||
2657 | tmp &= ~PCTL_PWR_ON; | ||
2658 | tmp |= PCTL_OFF; | ||
2659 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); | ||
2660 | |||
2661 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); | ||
2662 | tmp &= ~PCTL_PWR_ON; | ||
2663 | tmp |= PCTL_OFF; | ||
2664 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); | ||
2665 | } | ||
2666 | |||
2667 | /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ | ||
2668 | mw32_f(GBL_CTL, HBA_RST); | ||
2669 | } | ||
2670 | |||
2671 | /* wait for reset to finish; timeout is just a guess */ | ||
2672 | i = 1000; | ||
2673 | while (i-- > 0) { | ||
2674 | msleep(10); | ||
2675 | |||
2676 | if (!(mr32(GBL_CTL) & HBA_RST)) | ||
2677 | break; | ||
2678 | } | ||
2679 | if (mr32(GBL_CTL) & HBA_RST) { | ||
2680 | dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); | ||
2681 | return -EBUSY; | ||
2682 | } | ||
2683 | |||
2684 | /* Init Chip */ | ||
2685 | /* make sure RST is set; HBA_RST /should/ have done that for us */ | ||
2686 | cctl = mr32(CTL); | ||
2687 | if (cctl & CCTL_RST) | ||
2688 | cctl &= ~CCTL_RST; | ||
2689 | else | ||
2690 | mw32_f(CTL, cctl | CCTL_RST); | ||
2691 | |||
2692 | /* write to device control _AND_ device status register? - A.C. */ | ||
2693 | pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); | ||
2694 | tmp &= ~PRD_REQ_MASK; | ||
2695 | tmp |= PRD_REQ_SIZE; | ||
2696 | pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); | ||
2697 | |||
2698 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); | ||
2699 | tmp |= PCTL_PWR_ON; | ||
2700 | tmp &= ~PCTL_OFF; | ||
2701 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); | ||
2702 | |||
2703 | pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); | ||
2704 | tmp |= PCTL_PWR_ON; | ||
2705 | tmp &= ~PCTL_OFF; | ||
2706 | pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); | ||
2707 | |||
2708 | mw32_f(CTL, cctl); | ||
2709 | |||
2710 | /* reset control */ | ||
2711 | mw32(PCS, 0); /*MVS_PCS */ | ||
2712 | |||
2713 | mvs_phy_hacks(mvi); | ||
2714 | |||
2715 | mw32(CMD_LIST_LO, mvi->slot_dma); | ||
2716 | mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); | ||
2717 | |||
2718 | mw32(RX_FIS_LO, mvi->rx_fis_dma); | ||
2719 | mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); | ||
2720 | |||
2721 | mw32(TX_CFG, MVS_CHIP_SLOT_SZ); | ||
2722 | mw32(TX_LO, mvi->tx_dma); | ||
2723 | mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); | ||
2724 | |||
2725 | mw32(RX_CFG, MVS_RX_RING_SZ); | ||
2726 | mw32(RX_LO, mvi->rx_dma); | ||
2727 | mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); | ||
2728 | |||
2729 | /* enable auto port detection */ | ||
2730 | mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); | ||
2731 | msleep(100); | ||
2732 | /* init and reset phys */ | ||
2733 | for (i = 0; i < mvi->chip->n_phy; i++) { | ||
2734 | /* FIXME: is this the correct dword order? */ | ||
2735 | u32 lo = *((u32 *)&mvi->sas_addr[0]); | ||
2736 | u32 hi = *((u32 *)&mvi->sas_addr[4]); | ||
2737 | |||
2738 | mvs_detect_porttype(mvi, i); | ||
2739 | |||
2740 | /* set phy local SAS address */ | ||
2741 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); | ||
2742 | mvs_write_port_cfg_data(mvi, i, lo); | ||
2743 | mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); | ||
2744 | mvs_write_port_cfg_data(mvi, i, hi); | ||
2745 | |||
2746 | /* reset phy */ | ||
2747 | tmp = mvs_read_phy_ctl(mvi, i); | ||
2748 | tmp |= PHY_RST; | ||
2749 | mvs_write_phy_ctl(mvi, i, tmp); | ||
2750 | } | ||
2751 | |||
2752 | msleep(100); | ||
2753 | |||
2754 | for (i = 0; i < mvi->chip->n_phy; i++) { | ||
2755 | /* clear phy int status */ | ||
2756 | tmp = mvs_read_port_irq_stat(mvi, i); | ||
2757 | tmp &= ~PHYEV_SIG_FIS; | ||
2758 | mvs_write_port_irq_stat(mvi, i, tmp); | ||
2759 | |||
2760 | /* set phy int mask */ | ||
2761 | tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | | ||
2762 | PHYEV_ID_DONE | PHYEV_DEC_ERR; | ||
2763 | mvs_write_port_irq_mask(mvi, i, tmp); | ||
2764 | |||
2765 | msleep(100); | ||
2766 | mvs_update_phyinfo(mvi, i, 1); | ||
2767 | mvs_enable_xmt(mvi, i); | ||
2768 | } | ||
2769 | |||
2770 | /* FIXME: update wide port bitmaps */ | ||
2771 | |||
2772 | /* little endian for open address and command table, etc. */ | ||
2773 | /* A.C. | ||
2774 | * it seems that ( from the spec ) turning on big-endian won't | ||
2775 | * do us any good on big-endian machines, need further confirmation | ||
2776 | */ | ||
2777 | cctl = mr32(CTL); | ||
2778 | cctl |= CCTL_ENDIAN_CMD; | ||
2779 | cctl |= CCTL_ENDIAN_DATA; | ||
2780 | cctl &= ~CCTL_ENDIAN_OPEN; | ||
2781 | cctl |= CCTL_ENDIAN_RSP; | ||
2782 | mw32_f(CTL, cctl); | ||
2783 | |||
2784 | /* reset CMD queue */ | ||
2785 | tmp = mr32(PCS); | ||
2786 | tmp |= PCS_CMD_RST; | ||
2787 | mw32(PCS, tmp); | ||
2788 | /* interrupt coalescing may cause missing HW interrput in some case, | ||
2789 | * and the max count is 0x1ff, while our max slot is 0x200, | ||
2790 | * it will make count 0. | ||
2791 | */ | ||
2792 | tmp = 0; | ||
2793 | mw32(INT_COAL, tmp); | ||
2794 | |||
2795 | tmp = 0x100; | ||
2796 | mw32(INT_COAL_TMOUT, tmp); | ||
2797 | |||
2798 | /* ladies and gentlemen, start your engines */ | ||
2799 | mw32(TX_CFG, 0); | ||
2800 | mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); | ||
2801 | mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); | ||
2802 | /* enable CMD/CMPL_Q/RESP mode */ | ||
2803 | mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); | ||
2804 | |||
2805 | /* re-enable interrupts globally */ | ||
2806 | mvs_hba_interrupt_enable(mvi); | ||
2807 | |||
2808 | /* enable completion queue interrupt */ | ||
2809 | tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM); | ||
2810 | mw32(INT_MASK, tmp); | ||
2811 | |||
2812 | return 0; | ||
2813 | } | ||
2814 | |||
2815 | static void __devinit mvs_print_info(struct mvs_info *mvi) | ||
2816 | { | ||
2817 | struct pci_dev *pdev = mvi->pdev; | ||
2818 | static int printed_version; | ||
2819 | |||
2820 | if (!printed_version++) | ||
2821 | dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); | ||
2822 | |||
2823 | dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", | ||
2824 | mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); | ||
2825 | } | ||
2826 | |||
2827 | static int __devinit mvs_pci_init(struct pci_dev *pdev, | ||
2828 | const struct pci_device_id *ent) | ||
2829 | { | ||
2830 | int rc; | ||
2831 | struct mvs_info *mvi; | ||
2832 | irq_handler_t irq_handler = mvs_interrupt; | ||
2833 | |||
2834 | rc = pci_enable_device(pdev); | ||
2835 | if (rc) | ||
2836 | return rc; | ||
2837 | |||
2838 | pci_set_master(pdev); | ||
2839 | |||
2840 | rc = pci_request_regions(pdev, DRV_NAME); | ||
2841 | if (rc) | ||
2842 | goto err_out_disable; | ||
2843 | |||
2844 | rc = pci_go_64(pdev); | ||
2845 | if (rc) | ||
2846 | goto err_out_regions; | ||
2847 | |||
2848 | mvi = mvs_alloc(pdev, ent); | ||
2849 | if (!mvi) { | ||
2850 | rc = -ENOMEM; | ||
2851 | goto err_out_regions; | ||
2852 | } | ||
2853 | |||
2854 | rc = mvs_hw_init(mvi); | ||
2855 | if (rc) | ||
2856 | goto err_out_mvi; | ||
2857 | |||
2858 | #ifndef MVS_DISABLE_MSI | ||
2859 | if (!pci_enable_msi(pdev)) { | ||
2860 | u32 tmp; | ||
2861 | void __iomem *regs = mvi->regs; | ||
2862 | mvi->flags |= MVF_MSI; | ||
2863 | irq_handler = mvs_msi_interrupt; | ||
2864 | tmp = mr32(PCS); | ||
2865 | mw32(PCS, tmp | PCS_SELF_CLEAR); | ||
2866 | } | ||
2867 | #endif | ||
2868 | |||
2869 | rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); | ||
2870 | if (rc) | ||
2871 | goto err_out_msi; | ||
2872 | |||
2873 | rc = scsi_add_host(mvi->shost, &pdev->dev); | ||
2874 | if (rc) | ||
2875 | goto err_out_irq; | ||
2876 | |||
2877 | rc = sas_register_ha(&mvi->sas); | ||
2878 | if (rc) | ||
2879 | goto err_out_shost; | ||
2880 | |||
2881 | pci_set_drvdata(pdev, mvi); | ||
2882 | |||
2883 | mvs_print_info(mvi); | ||
2884 | |||
2885 | scsi_scan_host(mvi->shost); | ||
2886 | |||
2887 | return 0; | ||
2888 | |||
2889 | err_out_shost: | ||
2890 | scsi_remove_host(mvi->shost); | ||
2891 | err_out_irq: | ||
2892 | free_irq(pdev->irq, mvi); | ||
2893 | err_out_msi: | ||
2894 | if (mvi->flags |= MVF_MSI) | ||
2895 | pci_disable_msi(pdev); | ||
2896 | err_out_mvi: | ||
2897 | mvs_free(mvi); | ||
2898 | err_out_regions: | ||
2899 | pci_release_regions(pdev); | ||
2900 | err_out_disable: | ||
2901 | pci_disable_device(pdev); | ||
2902 | return rc; | ||
2903 | } | ||
2904 | |||
2905 | static void __devexit mvs_pci_remove(struct pci_dev *pdev) | ||
2906 | { | ||
2907 | struct mvs_info *mvi = pci_get_drvdata(pdev); | ||
2908 | |||
2909 | pci_set_drvdata(pdev, NULL); | ||
2910 | |||
2911 | if (mvi) { | ||
2912 | sas_unregister_ha(&mvi->sas); | ||
2913 | mvs_hba_interrupt_disable(mvi); | ||
2914 | sas_remove_host(mvi->shost); | ||
2915 | scsi_remove_host(mvi->shost); | ||
2916 | |||
2917 | free_irq(pdev->irq, mvi); | ||
2918 | if (mvi->flags & MVF_MSI) | ||
2919 | pci_disable_msi(pdev); | ||
2920 | mvs_free(mvi); | ||
2921 | pci_release_regions(pdev); | ||
2922 | } | ||
2923 | pci_disable_device(pdev); | ||
2924 | } | ||
2925 | |||
2926 | static struct sas_domain_function_template mvs_transport_ops = { | ||
2927 | .lldd_execute_task = mvs_task_exec, | ||
2928 | .lldd_control_phy = mvs_phy_control, | ||
2929 | .lldd_abort_task = mvs_task_abort, | ||
2930 | .lldd_port_formed = mvs_port_formed | ||
2931 | }; | ||
2932 | |||
2933 | static struct pci_device_id __devinitdata mvs_pci_table[] = { | ||
2934 | { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, | ||
2935 | { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, | ||
2936 | { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, | ||
2937 | { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, | ||
2938 | |||
2939 | { } /* terminate list */ | ||
2940 | }; | ||
2941 | |||
2942 | static struct pci_driver mvs_pci_driver = { | ||
2943 | .name = DRV_NAME, | ||
2944 | .id_table = mvs_pci_table, | ||
2945 | .probe = mvs_pci_init, | ||
2946 | .remove = __devexit_p(mvs_pci_remove), | ||
2947 | }; | ||
2948 | |||
2949 | static int __init mvs_init(void) | ||
2950 | { | ||
2951 | int rc; | ||
2952 | |||
2953 | mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); | ||
2954 | if (!mvs_stt) | ||
2955 | return -ENOMEM; | ||
2956 | |||
2957 | rc = pci_register_driver(&mvs_pci_driver); | ||
2958 | if (rc) | ||
2959 | goto err_out; | ||
2960 | |||
2961 | return 0; | ||
2962 | |||
2963 | err_out: | ||
2964 | sas_release_transport(mvs_stt); | ||
2965 | return rc; | ||
2966 | } | ||
2967 | |||
2968 | static void __exit mvs_exit(void) | ||
2969 | { | ||
2970 | pci_unregister_driver(&mvs_pci_driver); | ||
2971 | sas_release_transport(mvs_stt); | ||
2972 | } | ||
2973 | |||
2974 | module_init(mvs_init); | ||
2975 | module_exit(mvs_exit); | ||
2976 | |||
2977 | MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); | ||
2978 | MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); | ||
2979 | MODULE_VERSION(DRV_VERSION); | ||
2980 | MODULE_LICENSE("GPL"); | ||
2981 | MODULE_DEVICE_TABLE(pci, mvs_pci_table); | ||
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 1479c60441c8..2cd899bfe84b 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
@@ -23,7 +23,7 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) | |||
23 | mutex_lock(&ha->fce_mutex); | 23 | mutex_lock(&ha->fce_mutex); |
24 | 24 | ||
25 | seq_printf(s, "FCE Trace Buffer\n"); | 25 | seq_printf(s, "FCE Trace Buffer\n"); |
26 | seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr); | 26 | seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); |
27 | seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); | 27 | seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); |
28 | seq_printf(s, "FCE Enable Registers\n"); | 28 | seq_printf(s, "FCE Enable Registers\n"); |
29 | seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", | 29 | seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 0f029d0d7315..fc84db4069f4 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -100,8 +100,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, | |||
100 | 100 | ||
101 | if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { | 101 | if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { |
102 | scsi_set_resid(cmd, residual); | 102 | scsi_set_resid(cmd, residual); |
103 | if (!scsi_status && ((scsi_bufflen(cmd) - residual) < | 103 | if ((scsi_bufflen(cmd) - residual) < cmd->underflow) { |
104 | cmd->underflow)) { | ||
105 | 104 | ||
106 | cmd->result = DID_ERROR << 16; | 105 | cmd->result = DID_ERROR << 16; |
107 | 106 | ||
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 65455ab1f3b9..4a1cf6377f6c 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -651,7 +651,7 @@ static int qlogicpti_verify_tmon(struct qlogicpti *qpti) | |||
651 | 651 | ||
652 | static irqreturn_t qpti_intr(int irq, void *dev_id); | 652 | static irqreturn_t qpti_intr(int irq, void *dev_id); |
653 | 653 | ||
654 | static void __init qpti_chain_add(struct qlogicpti *qpti) | 654 | static void __devinit qpti_chain_add(struct qlogicpti *qpti) |
655 | { | 655 | { |
656 | spin_lock_irq(&qptichain_lock); | 656 | spin_lock_irq(&qptichain_lock); |
657 | if (qptichain != NULL) { | 657 | if (qptichain != NULL) { |
@@ -667,7 +667,7 @@ static void __init qpti_chain_add(struct qlogicpti *qpti) | |||
667 | spin_unlock_irq(&qptichain_lock); | 667 | spin_unlock_irq(&qptichain_lock); |
668 | } | 668 | } |
669 | 669 | ||
670 | static void __init qpti_chain_del(struct qlogicpti *qpti) | 670 | static void __devexit qpti_chain_del(struct qlogicpti *qpti) |
671 | { | 671 | { |
672 | spin_lock_irq(&qptichain_lock); | 672 | spin_lock_irq(&qptichain_lock); |
673 | if (qptichain == qpti) { | 673 | if (qptichain == qpti) { |
@@ -682,7 +682,7 @@ static void __init qpti_chain_del(struct qlogicpti *qpti) | |||
682 | spin_unlock_irq(&qptichain_lock); | 682 | spin_unlock_irq(&qptichain_lock); |
683 | } | 683 | } |
684 | 684 | ||
685 | static int __init qpti_map_regs(struct qlogicpti *qpti) | 685 | static int __devinit qpti_map_regs(struct qlogicpti *qpti) |
686 | { | 686 | { |
687 | struct sbus_dev *sdev = qpti->sdev; | 687 | struct sbus_dev *sdev = qpti->sdev; |
688 | 688 | ||
@@ -705,7 +705,7 @@ static int __init qpti_map_regs(struct qlogicpti *qpti) | |||
705 | return 0; | 705 | return 0; |
706 | } | 706 | } |
707 | 707 | ||
708 | static int __init qpti_register_irq(struct qlogicpti *qpti) | 708 | static int __devinit qpti_register_irq(struct qlogicpti *qpti) |
709 | { | 709 | { |
710 | struct sbus_dev *sdev = qpti->sdev; | 710 | struct sbus_dev *sdev = qpti->sdev; |
711 | 711 | ||
@@ -730,7 +730,7 @@ fail: | |||
730 | return -1; | 730 | return -1; |
731 | } | 731 | } |
732 | 732 | ||
733 | static void __init qpti_get_scsi_id(struct qlogicpti *qpti) | 733 | static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti) |
734 | { | 734 | { |
735 | qpti->scsi_id = prom_getintdefault(qpti->prom_node, | 735 | qpti->scsi_id = prom_getintdefault(qpti->prom_node, |
736 | "initiator-id", | 736 | "initiator-id", |
@@ -783,7 +783,7 @@ static void qpti_get_clock(struct qlogicpti *qpti) | |||
783 | /* The request and response queues must each be aligned | 783 | /* The request and response queues must each be aligned |
784 | * on a page boundary. | 784 | * on a page boundary. |
785 | */ | 785 | */ |
786 | static int __init qpti_map_queues(struct qlogicpti *qpti) | 786 | static int __devinit qpti_map_queues(struct qlogicpti *qpti) |
787 | { | 787 | { |
788 | struct sbus_dev *sdev = qpti->sdev; | 788 | struct sbus_dev *sdev = qpti->sdev; |
789 | 789 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1541c174937a..d1777a9a9625 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -222,7 +222,7 @@ static struct scsi_host_template sdebug_driver_template = { | |||
222 | .cmd_per_lun = 16, | 222 | .cmd_per_lun = 16, |
223 | .max_sectors = 0xffff, | 223 | .max_sectors = 0xffff, |
224 | .unchecked_isa_dma = 0, | 224 | .unchecked_isa_dma = 0, |
225 | .use_clustering = ENABLE_CLUSTERING, | 225 | .use_clustering = DISABLE_CLUSTERING, |
226 | .module = THIS_MODULE, | 226 | .module = THIS_MODULE, |
227 | }; | 227 | }; |
228 | 228 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index fac7534f3ec4..9981682d5302 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -231,7 +231,7 @@ static struct { | |||
231 | { ISCSI_SESSION_FREE, "FREE" }, | 231 | { ISCSI_SESSION_FREE, "FREE" }, |
232 | }; | 232 | }; |
233 | 233 | ||
234 | const char *iscsi_session_state_name(int state) | 234 | static const char *iscsi_session_state_name(int state) |
235 | { | 235 | { |
236 | int i; | 236 | int i; |
237 | char *name = NULL; | 237 | char *name = NULL; |
@@ -373,7 +373,7 @@ static void session_recovery_timedout(struct work_struct *work) | |||
373 | scsi_target_unblock(&session->dev); | 373 | scsi_target_unblock(&session->dev); |
374 | } | 374 | } |
375 | 375 | ||
376 | void __iscsi_unblock_session(struct iscsi_cls_session *session) | 376 | static void __iscsi_unblock_session(struct iscsi_cls_session *session) |
377 | { | 377 | { |
378 | if (!cancel_delayed_work(&session->recovery_work)) | 378 | if (!cancel_delayed_work(&session->recovery_work)) |
379 | flush_workqueue(iscsi_eh_timer_workq); | 379 | flush_workqueue(iscsi_eh_timer_workq); |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index a57fed47b39d..a6d96694d0a5 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -33,9 +33,9 @@ | |||
33 | #include <scsi/scsi_host.h> | 33 | #include <scsi/scsi_host.h> |
34 | 34 | ||
35 | struct ses_device { | 35 | struct ses_device { |
36 | char *page1; | 36 | unsigned char *page1; |
37 | char *page2; | 37 | unsigned char *page2; |
38 | char *page10; | 38 | unsigned char *page10; |
39 | short page1_len; | 39 | short page1_len; |
40 | short page2_len; | 40 | short page2_len; |
41 | short page10_len; | 41 | short page10_len; |
@@ -67,7 +67,7 @@ static int ses_probe(struct device *dev) | |||
67 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, | 67 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, |
68 | void *buf, int bufflen) | 68 | void *buf, int bufflen) |
69 | { | 69 | { |
70 | char cmd[] = { | 70 | unsigned char cmd[] = { |
71 | RECEIVE_DIAGNOSTIC, | 71 | RECEIVE_DIAGNOSTIC, |
72 | 1, /* Set PCV bit */ | 72 | 1, /* Set PCV bit */ |
73 | page_code, | 73 | page_code, |
@@ -85,7 +85,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code, | |||
85 | { | 85 | { |
86 | u32 result; | 86 | u32 result; |
87 | 87 | ||
88 | char cmd[] = { | 88 | unsigned char cmd[] = { |
89 | SEND_DIAGNOSTIC, | 89 | SEND_DIAGNOSTIC, |
90 | 0x10, /* Set PF bit */ | 90 | 0x10, /* Set PF bit */ |
91 | 0, | 91 | 0, |
@@ -104,13 +104,13 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code, | |||
104 | 104 | ||
105 | static int ses_set_page2_descriptor(struct enclosure_device *edev, | 105 | static int ses_set_page2_descriptor(struct enclosure_device *edev, |
106 | struct enclosure_component *ecomp, | 106 | struct enclosure_component *ecomp, |
107 | char *desc) | 107 | unsigned char *desc) |
108 | { | 108 | { |
109 | int i, j, count = 0, descriptor = ecomp->number; | 109 | int i, j, count = 0, descriptor = ecomp->number; |
110 | struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); | 110 | struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); |
111 | struct ses_device *ses_dev = edev->scratch; | 111 | struct ses_device *ses_dev = edev->scratch; |
112 | char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 112 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; |
113 | char *desc_ptr = ses_dev->page2 + 8; | 113 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
114 | 114 | ||
115 | /* Clear everything */ | 115 | /* Clear everything */ |
116 | memset(desc_ptr, 0, ses_dev->page2_len - 8); | 116 | memset(desc_ptr, 0, ses_dev->page2_len - 8); |
@@ -133,14 +133,14 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev, | |||
133 | return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); | 133 | return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); |
134 | } | 134 | } |
135 | 135 | ||
136 | static char *ses_get_page2_descriptor(struct enclosure_device *edev, | 136 | static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev, |
137 | struct enclosure_component *ecomp) | 137 | struct enclosure_component *ecomp) |
138 | { | 138 | { |
139 | int i, j, count = 0, descriptor = ecomp->number; | 139 | int i, j, count = 0, descriptor = ecomp->number; |
140 | struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); | 140 | struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); |
141 | struct ses_device *ses_dev = edev->scratch; | 141 | struct ses_device *ses_dev = edev->scratch; |
142 | char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 142 | unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; |
143 | char *desc_ptr = ses_dev->page2 + 8; | 143 | unsigned char *desc_ptr = ses_dev->page2 + 8; |
144 | 144 | ||
145 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); | 145 | ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); |
146 | 146 | ||
@@ -160,17 +160,18 @@ static char *ses_get_page2_descriptor(struct enclosure_device *edev, | |||
160 | static void ses_get_fault(struct enclosure_device *edev, | 160 | static void ses_get_fault(struct enclosure_device *edev, |
161 | struct enclosure_component *ecomp) | 161 | struct enclosure_component *ecomp) |
162 | { | 162 | { |
163 | char *desc; | 163 | unsigned char *desc; |
164 | 164 | ||
165 | desc = ses_get_page2_descriptor(edev, ecomp); | 165 | desc = ses_get_page2_descriptor(edev, ecomp); |
166 | ecomp->fault = (desc[3] & 0x60) >> 4; | 166 | if (desc) |
167 | ecomp->fault = (desc[3] & 0x60) >> 4; | ||
167 | } | 168 | } |
168 | 169 | ||
169 | static int ses_set_fault(struct enclosure_device *edev, | 170 | static int ses_set_fault(struct enclosure_device *edev, |
170 | struct enclosure_component *ecomp, | 171 | struct enclosure_component *ecomp, |
171 | enum enclosure_component_setting val) | 172 | enum enclosure_component_setting val) |
172 | { | 173 | { |
173 | char desc[4] = {0 }; | 174 | unsigned char desc[4] = {0 }; |
174 | 175 | ||
175 | switch (val) { | 176 | switch (val) { |
176 | case ENCLOSURE_SETTING_DISABLED: | 177 | case ENCLOSURE_SETTING_DISABLED: |
@@ -190,26 +191,28 @@ static int ses_set_fault(struct enclosure_device *edev, | |||
190 | static void ses_get_status(struct enclosure_device *edev, | 191 | static void ses_get_status(struct enclosure_device *edev, |
191 | struct enclosure_component *ecomp) | 192 | struct enclosure_component *ecomp) |
192 | { | 193 | { |
193 | char *desc; | 194 | unsigned char *desc; |
194 | 195 | ||
195 | desc = ses_get_page2_descriptor(edev, ecomp); | 196 | desc = ses_get_page2_descriptor(edev, ecomp); |
196 | ecomp->status = (desc[0] & 0x0f); | 197 | if (desc) |
198 | ecomp->status = (desc[0] & 0x0f); | ||
197 | } | 199 | } |
198 | 200 | ||
199 | static void ses_get_locate(struct enclosure_device *edev, | 201 | static void ses_get_locate(struct enclosure_device *edev, |
200 | struct enclosure_component *ecomp) | 202 | struct enclosure_component *ecomp) |
201 | { | 203 | { |
202 | char *desc; | 204 | unsigned char *desc; |
203 | 205 | ||
204 | desc = ses_get_page2_descriptor(edev, ecomp); | 206 | desc = ses_get_page2_descriptor(edev, ecomp); |
205 | ecomp->locate = (desc[2] & 0x02) ? 1 : 0; | 207 | if (desc) |
208 | ecomp->locate = (desc[2] & 0x02) ? 1 : 0; | ||
206 | } | 209 | } |
207 | 210 | ||
208 | static int ses_set_locate(struct enclosure_device *edev, | 211 | static int ses_set_locate(struct enclosure_device *edev, |
209 | struct enclosure_component *ecomp, | 212 | struct enclosure_component *ecomp, |
210 | enum enclosure_component_setting val) | 213 | enum enclosure_component_setting val) |
211 | { | 214 | { |
212 | char desc[4] = {0 }; | 215 | unsigned char desc[4] = {0 }; |
213 | 216 | ||
214 | switch (val) { | 217 | switch (val) { |
215 | case ENCLOSURE_SETTING_DISABLED: | 218 | case ENCLOSURE_SETTING_DISABLED: |
@@ -229,7 +232,7 @@ static int ses_set_active(struct enclosure_device *edev, | |||
229 | struct enclosure_component *ecomp, | 232 | struct enclosure_component *ecomp, |
230 | enum enclosure_component_setting val) | 233 | enum enclosure_component_setting val) |
231 | { | 234 | { |
232 | char desc[4] = {0 }; | 235 | unsigned char desc[4] = {0 }; |
233 | 236 | ||
234 | switch (val) { | 237 | switch (val) { |
235 | case ENCLOSURE_SETTING_DISABLED: | 238 | case ENCLOSURE_SETTING_DISABLED: |
@@ -409,11 +412,11 @@ static int ses_intf_add(struct class_device *cdev, | |||
409 | { | 412 | { |
410 | struct scsi_device *sdev = to_scsi_device(cdev->dev); | 413 | struct scsi_device *sdev = to_scsi_device(cdev->dev); |
411 | struct scsi_device *tmp_sdev; | 414 | struct scsi_device *tmp_sdev; |
412 | unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr, | 415 | unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr = NULL, |
413 | *addl_desc_ptr; | 416 | *addl_desc_ptr = NULL; |
414 | struct ses_device *ses_dev; | 417 | struct ses_device *ses_dev; |
415 | u32 result; | 418 | u32 result; |
416 | int i, j, types, len, components = 0; | 419 | int i, j, types, len, page7_len = 0, components = 0; |
417 | int err = -ENOMEM; | 420 | int err = -ENOMEM; |
418 | struct enclosure_device *edev; | 421 | struct enclosure_device *edev; |
419 | struct ses_component *scomp = NULL; | 422 | struct ses_component *scomp = NULL; |
@@ -447,7 +450,7 @@ static int ses_intf_add(struct class_device *cdev, | |||
447 | * traversal routines more complex */ | 450 | * traversal routines more complex */ |
448 | sdev_printk(KERN_ERR, sdev, | 451 | sdev_printk(KERN_ERR, sdev, |
449 | "FIXME driver has no support for subenclosures (%d)\n", | 452 | "FIXME driver has no support for subenclosures (%d)\n", |
450 | buf[1]); | 453 | hdr_buf[1]); |
451 | goto err_free; | 454 | goto err_free; |
452 | } | 455 | } |
453 | 456 | ||
@@ -461,9 +464,8 @@ static int ses_intf_add(struct class_device *cdev, | |||
461 | goto recv_failed; | 464 | goto recv_failed; |
462 | 465 | ||
463 | types = buf[10]; | 466 | types = buf[10]; |
464 | len = buf[11]; | ||
465 | 467 | ||
466 | type_ptr = buf + 12 + len; | 468 | type_ptr = buf + 12 + buf[11]; |
467 | 469 | ||
468 | for (i = 0; i < types; i++, type_ptr += 4) { | 470 | for (i = 0; i < types; i++, type_ptr += 4) { |
469 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || | 471 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || |
@@ -494,22 +496,21 @@ static int ses_intf_add(struct class_device *cdev, | |||
494 | /* The additional information page --- allows us | 496 | /* The additional information page --- allows us |
495 | * to match up the devices */ | 497 | * to match up the devices */ |
496 | result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); | 498 | result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); |
497 | if (result) | 499 | if (!result) { |
498 | goto no_page10; | 500 | |
499 | 501 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | |
500 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | 502 | buf = kzalloc(len, GFP_KERNEL); |
501 | buf = kzalloc(len, GFP_KERNEL); | 503 | if (!buf) |
502 | if (!buf) | 504 | goto err_free; |
503 | goto err_free; | 505 | |
504 | 506 | result = ses_recv_diag(sdev, 10, buf, len); | |
505 | result = ses_recv_diag(sdev, 10, buf, len); | 507 | if (result) |
506 | if (result) | 508 | goto recv_failed; |
507 | goto recv_failed; | 509 | ses_dev->page10 = buf; |
508 | ses_dev->page10 = buf; | 510 | ses_dev->page10_len = len; |
509 | ses_dev->page10_len = len; | 511 | buf = NULL; |
510 | buf = NULL; | 512 | } |
511 | 513 | ||
512 | no_page10: | ||
513 | scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); | 514 | scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); |
514 | if (!scomp) | 515 | if (!scomp) |
515 | goto err_free; | 516 | goto err_free; |
@@ -530,7 +531,7 @@ static int ses_intf_add(struct class_device *cdev, | |||
530 | if (result) | 531 | if (result) |
531 | goto simple_populate; | 532 | goto simple_populate; |
532 | 533 | ||
533 | len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; | 534 | page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; |
534 | /* add 1 for trailing '\0' we'll use */ | 535 | /* add 1 for trailing '\0' we'll use */ |
535 | buf = kzalloc(len + 1, GFP_KERNEL); | 536 | buf = kzalloc(len + 1, GFP_KERNEL); |
536 | if (!buf) | 537 | if (!buf) |
@@ -547,7 +548,8 @@ static int ses_intf_add(struct class_device *cdev, | |||
547 | len = (desc_ptr[2] << 8) + desc_ptr[3]; | 548 | len = (desc_ptr[2] << 8) + desc_ptr[3]; |
548 | /* skip past overall descriptor */ | 549 | /* skip past overall descriptor */ |
549 | desc_ptr += len + 4; | 550 | desc_ptr += len + 4; |
550 | addl_desc_ptr = ses_dev->page10 + 8; | 551 | if (ses_dev->page10) |
552 | addl_desc_ptr = ses_dev->page10 + 8; | ||
551 | } | 553 | } |
552 | type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; | 554 | type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; |
553 | components = 0; | 555 | components = 0; |
@@ -557,29 +559,35 @@ static int ses_intf_add(struct class_device *cdev, | |||
557 | struct enclosure_component *ecomp; | 559 | struct enclosure_component *ecomp; |
558 | 560 | ||
559 | if (desc_ptr) { | 561 | if (desc_ptr) { |
560 | len = (desc_ptr[2] << 8) + desc_ptr[3]; | 562 | if (desc_ptr >= buf + page7_len) { |
561 | desc_ptr += 4; | 563 | desc_ptr = NULL; |
562 | /* Add trailing zero - pushes into | 564 | } else { |
563 | * reserved space */ | 565 | len = (desc_ptr[2] << 8) + desc_ptr[3]; |
564 | desc_ptr[len] = '\0'; | 566 | desc_ptr += 4; |
565 | name = desc_ptr; | 567 | /* Add trailing zero - pushes into |
568 | * reserved space */ | ||
569 | desc_ptr[len] = '\0'; | ||
570 | name = desc_ptr; | ||
571 | } | ||
566 | } | 572 | } |
567 | if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && | 573 | if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || |
568 | type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) | 574 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { |
569 | continue; | 575 | |
570 | ecomp = enclosure_component_register(edev, | 576 | ecomp = enclosure_component_register(edev, |
571 | components++, | 577 | components++, |
572 | type_ptr[0], | 578 | type_ptr[0], |
573 | name); | 579 | name); |
574 | if (desc_ptr) { | 580 | |
575 | desc_ptr += len; | 581 | if (!IS_ERR(ecomp) && addl_desc_ptr) |
576 | if (!IS_ERR(ecomp)) | ||
577 | ses_process_descriptor(ecomp, | 582 | ses_process_descriptor(ecomp, |
578 | addl_desc_ptr); | 583 | addl_desc_ptr); |
579 | |||
580 | if (addl_desc_ptr) | ||
581 | addl_desc_ptr += addl_desc_ptr[1] + 2; | ||
582 | } | 584 | } |
585 | if (desc_ptr) | ||
586 | desc_ptr += len; | ||
587 | |||
588 | if (addl_desc_ptr) | ||
589 | addl_desc_ptr += addl_desc_ptr[1] + 2; | ||
590 | |||
583 | } | 591 | } |
584 | } | 592 | } |
585 | kfree(buf); | 593 | kfree(buf); |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 71952703125a..0a52d9d2da2c 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -17,7 +17,7 @@ | |||
17 | Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support | 17 | Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support |
18 | */ | 18 | */ |
19 | 19 | ||
20 | static const char *verstr = "20080117"; | 20 | static const char *verstr = "20080221"; |
21 | 21 | ||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
@@ -1172,7 +1172,7 @@ static int st_open(struct inode *inode, struct file *filp) | |||
1172 | STp->try_dio_now = STp->try_dio; | 1172 | STp->try_dio_now = STp->try_dio; |
1173 | STp->recover_count = 0; | 1173 | STp->recover_count = 0; |
1174 | DEB( STp->nbr_waits = STp->nbr_finished = 0; | 1174 | DEB( STp->nbr_waits = STp->nbr_finished = 0; |
1175 | STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; ) | 1175 | STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = 0; ) |
1176 | 1176 | ||
1177 | retval = check_tape(STp, filp); | 1177 | retval = check_tape(STp, filp); |
1178 | if (retval < 0) | 1178 | if (retval < 0) |
@@ -1226,8 +1226,8 @@ static int st_flush(struct file *filp, fl_owner_t id) | |||
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | DEBC( if (STp->nbr_requests) | 1228 | DEBC( if (STp->nbr_requests) |
1229 | printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n", | 1229 | printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d.\n", |
1230 | name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable)); | 1230 | name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages)); |
1231 | 1231 | ||
1232 | if (STps->rw == ST_WRITING && !STp->pos_unknown) { | 1232 | if (STps->rw == ST_WRITING && !STp->pos_unknown) { |
1233 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; | 1233 | struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; |
@@ -1422,9 +1422,6 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf, | |||
1422 | if (STbp->do_dio) { | 1422 | if (STbp->do_dio) { |
1423 | STp->nbr_dio++; | 1423 | STp->nbr_dio++; |
1424 | STp->nbr_pages += STbp->do_dio; | 1424 | STp->nbr_pages += STbp->do_dio; |
1425 | for (i=1; i < STbp->do_dio; i++) | ||
1426 | if (page_to_pfn(STbp->sg[i].page) == page_to_pfn(STbp->sg[i-1].page) + 1) | ||
1427 | STp->nbr_combinable++; | ||
1428 | } | 1425 | } |
1429 | ) | 1426 | ) |
1430 | } else | 1427 | } else |
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index 6c8075712974..5931726fcf93 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h | |||
@@ -164,7 +164,6 @@ struct scsi_tape { | |||
164 | int nbr_requests; | 164 | int nbr_requests; |
165 | int nbr_dio; | 165 | int nbr_dio; |
166 | int nbr_pages; | 166 | int nbr_pages; |
167 | int nbr_combinable; | ||
168 | unsigned char last_cmnd[6]; | 167 | unsigned char last_cmnd[6]; |
169 | unsigned char last_sense[16]; | 168 | unsigned char last_sense[16]; |
170 | #endif | 169 | #endif |
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 72f6d8015358..654430edf74d 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c | |||
@@ -461,30 +461,14 @@ static void stex_internal_copy(struct scsi_cmnd *cmd, | |||
461 | } | 461 | } |
462 | } | 462 | } |
463 | 463 | ||
464 | static int stex_direct_copy(struct scsi_cmnd *cmd, | ||
465 | const void *src, size_t count) | ||
466 | { | ||
467 | size_t cp_len = count; | ||
468 | int n_elem = 0; | ||
469 | |||
470 | n_elem = scsi_dma_map(cmd); | ||
471 | if (n_elem < 0) | ||
472 | return 0; | ||
473 | |||
474 | stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD); | ||
475 | |||
476 | scsi_dma_unmap(cmd); | ||
477 | |||
478 | return cp_len == count; | ||
479 | } | ||
480 | |||
481 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) | 464 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) |
482 | { | 465 | { |
483 | struct st_frame *p; | 466 | struct st_frame *p; |
484 | size_t count = sizeof(struct st_frame); | 467 | size_t count = sizeof(struct st_frame); |
485 | 468 | ||
486 | p = hba->copy_buffer; | 469 | p = hba->copy_buffer; |
487 | stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD); | 470 | stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd), |
471 | ST_FROM_CMD); | ||
488 | memset(p->base, 0, sizeof(u32)*6); | 472 | memset(p->base, 0, sizeof(u32)*6); |
489 | *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); | 473 | *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); |
490 | p->rom_addr = 0; | 474 | p->rom_addr = 0; |
@@ -502,7 +486,8 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) | |||
502 | p->subid = | 486 | p->subid = |
503 | hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; | 487 | hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; |
504 | 488 | ||
505 | stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_TO_CMD); | 489 | stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd), |
490 | ST_TO_CMD); | ||
506 | } | 491 | } |
507 | 492 | ||
508 | static void | 493 | static void |
@@ -569,8 +554,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
569 | unsigned char page; | 554 | unsigned char page; |
570 | page = cmd->cmnd[2] & 0x3f; | 555 | page = cmd->cmnd[2] & 0x3f; |
571 | if (page == 0x8 || page == 0x3f) { | 556 | if (page == 0x8 || page == 0x3f) { |
572 | stex_direct_copy(cmd, ms10_caching_page, | 557 | size_t cp_len = sizeof(ms10_caching_page); |
573 | sizeof(ms10_caching_page)); | 558 | stex_internal_copy(cmd, ms10_caching_page, |
559 | &cp_len, scsi_sg_count(cmd), | ||
560 | ST_TO_CMD); | ||
574 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | 561 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
575 | done(cmd); | 562 | done(cmd); |
576 | } else | 563 | } else |
@@ -599,8 +586,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
599 | if (id != host->max_id - 1) | 586 | if (id != host->max_id - 1) |
600 | break; | 587 | break; |
601 | if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { | 588 | if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { |
602 | stex_direct_copy(cmd, console_inq_page, | 589 | size_t cp_len = sizeof(console_inq_page); |
603 | sizeof(console_inq_page)); | 590 | stex_internal_copy(cmd, console_inq_page, |
591 | &cp_len, scsi_sg_count(cmd), | ||
592 | ST_TO_CMD); | ||
604 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | 593 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
605 | done(cmd); | 594 | done(cmd); |
606 | } else | 595 | } else |
@@ -609,6 +598,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
609 | case PASSTHRU_CMD: | 598 | case PASSTHRU_CMD: |
610 | if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { | 599 | if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { |
611 | struct st_drvver ver; | 600 | struct st_drvver ver; |
601 | size_t cp_len = sizeof(ver); | ||
612 | ver.major = ST_VER_MAJOR; | 602 | ver.major = ST_VER_MAJOR; |
613 | ver.minor = ST_VER_MINOR; | 603 | ver.minor = ST_VER_MINOR; |
614 | ver.oem = ST_OEM; | 604 | ver.oem = ST_OEM; |
@@ -616,7 +606,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
616 | ver.signature[0] = PASSTHRU_SIGNATURE; | 606 | ver.signature[0] = PASSTHRU_SIGNATURE; |
617 | ver.console_id = host->max_id - 1; | 607 | ver.console_id = host->max_id - 1; |
618 | ver.host_no = hba->host->host_no; | 608 | ver.host_no = hba->host->host_no; |
619 | cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ? | 609 | stex_internal_copy(cmd, &ver, &cp_len, |
610 | scsi_sg_count(cmd), ST_TO_CMD); | ||
611 | cmd->result = sizeof(ver) == cp_len ? | ||
620 | DID_OK << 16 | COMMAND_COMPLETE << 8 : | 612 | DID_OK << 16 | COMMAND_COMPLETE << 8 : |
621 | DID_ERROR << 16 | COMMAND_COMPLETE << 8; | 613 | DID_ERROR << 16 | COMMAND_COMPLETE << 8; |
622 | done(cmd); | 614 | done(cmd); |
@@ -709,7 +701,7 @@ static void stex_copy_data(struct st_ccb *ccb, | |||
709 | if (ccb->cmd == NULL) | 701 | if (ccb->cmd == NULL) |
710 | return; | 702 | return; |
711 | stex_internal_copy(ccb->cmd, | 703 | stex_internal_copy(ccb->cmd, |
712 | resp->variable, &count, ccb->sg_count, ST_TO_CMD); | 704 | resp->variable, &count, scsi_sg_count(ccb->cmd), ST_TO_CMD); |
713 | } | 705 | } |
714 | 706 | ||
715 | static void stex_ys_commands(struct st_hba *hba, | 707 | static void stex_ys_commands(struct st_hba *hba, |
@@ -734,7 +726,7 @@ static void stex_ys_commands(struct st_hba *hba, | |||
734 | 726 | ||
735 | count = STEX_EXTRA_SIZE; | 727 | count = STEX_EXTRA_SIZE; |
736 | stex_internal_copy(ccb->cmd, hba->copy_buffer, | 728 | stex_internal_copy(ccb->cmd, hba->copy_buffer, |
737 | &count, ccb->sg_count, ST_FROM_CMD); | 729 | &count, scsi_sg_count(ccb->cmd), ST_FROM_CMD); |
738 | inq_data = (ST_INQ *)hba->copy_buffer; | 730 | inq_data = (ST_INQ *)hba->copy_buffer; |
739 | if (inq_data->DeviceTypeQualifier != 0) | 731 | if (inq_data->DeviceTypeQualifier != 0) |
740 | ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; | 732 | ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; |