aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/isci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 15:55:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 15:55:29 -0400
commit424a6f6ef990b7e9f56f6627bfc6c46b493faeb4 (patch)
tree0028356ed8003495fbbe1f716f359e3c8ebc35b6 /drivers/scsi/isci
parent1ab142d499294b844ecc81e8004db4ce029b0b61 (diff)
parentcd8df932d894f3128c884e3ae1b2b484540513db (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
SCSI updates from James Bottomley: "The update includes the usual assortment of driver updates (lpfc, qla2xxx, qla4xxx, bfa, bnx2fc, bnx2i, isci, fcoe, hpsa) plus a huge amount of infrastructure work in the SAS library and transport class as well as an iSCSI update. There's also a new SCSI based virtio driver." * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (177 commits) [SCSI] qla4xxx: Update driver version to 5.02.00-k15 [SCSI] qla4xxx: trivial cleanup [SCSI] qla4xxx: Fix sparse warning [SCSI] qla4xxx: Add support for multiple session per host. [SCSI] qla4xxx: Export CHAP index as sysfs attribute [SCSI] scsi_transport: Export CHAP index as sysfs attribute [SCSI] qla4xxx: Add support to display CHAP list and delete CHAP entry [SCSI] iscsi_transport: Add support to display CHAP list and delete CHAP entry [SCSI] pm8001: fix endian issue with code optimization. [SCSI] pm8001: Fix possible racing condition. [SCSI] pm8001: Fix bogus interrupt state flag issue. [SCSI] ipr: update PCI ID definitions for new adapters [SCSI] qla2xxx: handle default case in qla2x00_request_firmware() [SCSI] isci: improvements in driver unloading routine [SCSI] isci: improve phy event warnings [SCSI] isci: debug, provide state-enum-to-string conversions [SCSI] scsi_transport_sas: 'enable' phys on reset [SCSI] libsas: don't recover end devices attached to disabled phys [SCSI] libsas: fixup target_port_protocols for expanders that don't report sata [SCSI] libsas: set attached device type and target protocols for local phys ...
Diffstat (limited to 'drivers/scsi/isci')
-rw-r--r--drivers/scsi/isci/host.c17
-rw-r--r--drivers/scsi/isci/host.h19
-rw-r--r--drivers/scsi/isci/init.c24
-rw-r--r--drivers/scsi/isci/phy.c171
-rw-r--r--drivers/scsi/isci/phy.h155
-rw-r--r--drivers/scsi/isci/port.c263
-rw-r--r--drivers/scsi/isci/port.h114
-rw-r--r--drivers/scsi/isci/registers.h27
-rw-r--r--drivers/scsi/isci/remote_device.c82
-rw-r--r--drivers/scsi/isci/remote_device.h212
-rw-r--r--drivers/scsi/isci/remote_node_context.c19
-rw-r--r--drivers/scsi/isci/remote_node_context.h97
-rw-r--r--drivers/scsi/isci/request.c370
-rw-r--r--drivers/scsi/isci/request.h228
-rw-r--r--drivers/scsi/isci/scu_task_context.h55
-rw-r--r--drivers/scsi/isci/task.c158
-rw-r--r--drivers/scsi/isci/task.h40
17 files changed, 911 insertions, 1140 deletions
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 6ca9b26bb2fb..d4bf9c12ecd4 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -649,15 +649,13 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
649 649
650int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) 650int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
651{ 651{
652 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 652 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
653 struct isci_host *ihost = ha->lldd_ha;
653 654
654 if (test_bit(IHOST_START_PENDING, &ihost->flags)) 655 if (test_bit(IHOST_START_PENDING, &ihost->flags))
655 return 0; 656 return 0;
656 657
657 /* todo: use sas_flush_discovery once it is upstream */ 658 sas_drain_work(ha);
658 scsi_flush_work(shost);
659
660 scsi_flush_work(shost);
661 659
662 dev_dbg(&ihost->pdev->dev, 660 dev_dbg(&ihost->pdev->dev,
663 "%s: ihost->status = %d, time = %ld\n", 661 "%s: ihost->status = %d, time = %ld\n",
@@ -1490,6 +1488,15 @@ sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1490static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) 1488static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1491{ 1489{
1492 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1490 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1491 u32 val;
1492
1493 /* enable clock gating for power control of the scu unit */
1494 val = readl(&ihost->smu_registers->clock_gating_control);
1495 val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
1496 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
1497 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
1498 val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
1499 writel(val, &ihost->smu_registers->clock_gating_control);
1493 1500
1494 /* set the default interrupt coalescence number and timeout value. */ 1501 /* set the default interrupt coalescence number and timeout value. */
1495 sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1502 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 5477f0fa8233..adbad69d1069 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -187,6 +187,7 @@ struct isci_host {
187 int id; /* unique within a given pci device */ 187 int id; /* unique within a given pci device */
188 struct isci_phy phys[SCI_MAX_PHYS]; 188 struct isci_phy phys[SCI_MAX_PHYS];
189 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ 189 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
190 struct asd_sas_port sas_ports[SCI_MAX_PORTS];
190 struct sas_ha_struct sas_ha; 191 struct sas_ha_struct sas_ha;
191 192
192 spinlock_t state_lock; 193 spinlock_t state_lock;
@@ -393,24 +394,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
393#define sci_controller_clear_invalid_phy(controller, phy) \ 394#define sci_controller_clear_invalid_phy(controller, phy) \
394 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) 395 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
395 396
396static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
397{
398
399 if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
400 return NULL;
401
402 return &iphy->isci_port->isci_host->pdev->dev;
403}
404
405static inline struct device *sciport_to_dev(struct isci_port *iport)
406{
407
408 if (!iport || !iport->isci_host)
409 return NULL;
410
411 return &iport->isci_host->pdev->dev;
412}
413
414static inline struct device *scirdev_to_dev(struct isci_remote_device *idev) 397static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
415{ 398{
416 if (!idev || !idev->isci_port || !idev->isci_port->isci_host) 399 if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 17c4c2c89c2e..5137db5a5d85 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -60,6 +60,7 @@
60#include <linux/efi.h> 60#include <linux/efi.h>
61#include <asm/string.h> 61#include <asm/string.h>
62#include <scsi/scsi_host.h> 62#include <scsi/scsi_host.h>
63#include "host.h"
63#include "isci.h" 64#include "isci.h"
64#include "task.h" 65#include "task.h"
65#include "probe_roms.h" 66#include "probe_roms.h"
@@ -154,7 +155,6 @@ static struct scsi_host_template isci_sht = {
154 .queuecommand = sas_queuecommand, 155 .queuecommand = sas_queuecommand,
155 .target_alloc = sas_target_alloc, 156 .target_alloc = sas_target_alloc,
156 .slave_configure = sas_slave_configure, 157 .slave_configure = sas_slave_configure,
157 .slave_destroy = sas_slave_destroy,
158 .scan_finished = isci_host_scan_finished, 158 .scan_finished = isci_host_scan_finished,
159 .scan_start = isci_host_scan_start, 159 .scan_start = isci_host_scan_start,
160 .change_queue_depth = sas_change_queue_depth, 160 .change_queue_depth = sas_change_queue_depth,
@@ -166,9 +166,6 @@ static struct scsi_host_template isci_sht = {
166 .sg_tablesize = SG_ALL, 166 .sg_tablesize = SG_ALL,
167 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 167 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
168 .use_clustering = ENABLE_CLUSTERING, 168 .use_clustering = ENABLE_CLUSTERING,
169 .eh_device_reset_handler = sas_eh_device_reset_handler,
170 .eh_bus_reset_handler = isci_bus_reset_handler,
171 .slave_alloc = sas_slave_alloc,
172 .target_destroy = sas_target_destroy, 169 .target_destroy = sas_target_destroy,
173 .ioctl = sas_ioctl, 170 .ioctl = sas_ioctl,
174 .shost_attrs = isci_host_attrs, 171 .shost_attrs = isci_host_attrs,
@@ -194,6 +191,9 @@ static struct sas_domain_function_template isci_transport_ops = {
194 .lldd_lu_reset = isci_task_lu_reset, 191 .lldd_lu_reset = isci_task_lu_reset,
195 .lldd_query_task = isci_task_query_task, 192 .lldd_query_task = isci_task_query_task,
196 193
194 /* ata recovery called from ata-eh */
195 .lldd_ata_check_ready = isci_ata_check_ready,
196
197 /* Port and Adapter management */ 197 /* Port and Adapter management */
198 .lldd_clear_nexus_port = isci_task_clear_nexus_port, 198 .lldd_clear_nexus_port = isci_task_clear_nexus_port,
199 .lldd_clear_nexus_ha = isci_task_clear_nexus_ha, 199 .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
@@ -242,18 +242,13 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
242 if (!sas_ports) 242 if (!sas_ports)
243 return -ENOMEM; 243 return -ENOMEM;
244 244
245 /*----------------- Libsas Initialization Stuff----------------------
246 * Set various fields in the sas_ha struct:
247 */
248
249 sas_ha->sas_ha_name = DRV_NAME; 245 sas_ha->sas_ha_name = DRV_NAME;
250 sas_ha->lldd_module = THIS_MODULE; 246 sas_ha->lldd_module = THIS_MODULE;
251 sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0]; 247 sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
252 248
253 /* set the array of phy and port structs. */
254 for (i = 0; i < SCI_MAX_PHYS; i++) { 249 for (i = 0; i < SCI_MAX_PHYS; i++) {
255 sas_phys[i] = &isci_host->phys[i].sas_phy; 250 sas_phys[i] = &isci_host->phys[i].sas_phy;
256 sas_ports[i] = &isci_host->ports[i].sas_port; 251 sas_ports[i] = &isci_host->sas_ports[i];
257 } 252 }
258 253
259 sas_ha->sas_phy = sas_phys; 254 sas_ha->sas_phy = sas_phys;
@@ -528,6 +523,13 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
528 goto err_host_alloc; 523 goto err_host_alloc;
529 } 524 }
530 pci_info->hosts[i] = h; 525 pci_info->hosts[i] = h;
526
527 /* turn on DIF support */
528 scsi_host_set_prot(h->shost,
529 SHOST_DIF_TYPE1_PROTECTION |
530 SHOST_DIF_TYPE2_PROTECTION |
531 SHOST_DIF_TYPE3_PROTECTION);
532 scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC);
531 } 533 }
532 534
533 err = isci_setup_interrupts(pdev); 535 err = isci_setup_interrupts(pdev);
@@ -551,9 +553,9 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
551 int i; 553 int i;
552 554
553 for_each_isci_host(i, ihost, pdev) { 555 for_each_isci_host(i, ihost, pdev) {
556 wait_for_start(ihost);
554 isci_unregister(ihost); 557 isci_unregister(ihost);
555 isci_host_deinit(ihost); 558 isci_host_deinit(ihost);
556 sci_controller_disable_interrupts(ihost);
557 } 559 }
558} 560}
559 561
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fe18acfd6eb3..fab3586840b5 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -59,6 +59,16 @@
59#include "scu_event_codes.h" 59#include "scu_event_codes.h"
60#include "probe_roms.h" 60#include "probe_roms.h"
61 61
62#undef C
63#define C(a) (#a)
64static const char *phy_state_name(enum sci_phy_states state)
65{
66 static const char * const strings[] = PHY_STATES;
67
68 return strings[state];
69}
70#undef C
71
62/* Maximum arbitration wait time in micro-seconds */ 72/* Maximum arbitration wait time in micro-seconds */
63#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) 73#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
64 74
@@ -67,6 +77,19 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
67 return iphy->max_negotiated_speed; 77 return iphy->max_negotiated_speed;
68} 78}
69 79
80static struct isci_host *phy_to_host(struct isci_phy *iphy)
81{
82 struct isci_phy *table = iphy - iphy->phy_index;
83 struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]);
84
85 return ihost;
86}
87
88static struct device *sciphy_to_dev(struct isci_phy *iphy)
89{
90 return &phy_to_host(iphy)->pdev->dev;
91}
92
70static enum sci_status 93static enum sci_status
71sci_phy_transport_layer_initialization(struct isci_phy *iphy, 94sci_phy_transport_layer_initialization(struct isci_phy *iphy,
72 struct scu_transport_layer_registers __iomem *reg) 95 struct scu_transport_layer_registers __iomem *reg)
@@ -446,8 +469,8 @@ enum sci_status sci_phy_start(struct isci_phy *iphy)
446 enum sci_phy_states state = iphy->sm.current_state_id; 469 enum sci_phy_states state = iphy->sm.current_state_id;
447 470
448 if (state != SCI_PHY_STOPPED) { 471 if (state != SCI_PHY_STOPPED) {
449 dev_dbg(sciphy_to_dev(iphy), 472 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
450 "%s: in wrong state: %d\n", __func__, state); 473 __func__, phy_state_name(state));
451 return SCI_FAILURE_INVALID_STATE; 474 return SCI_FAILURE_INVALID_STATE;
452 } 475 }
453 476
@@ -472,8 +495,8 @@ enum sci_status sci_phy_stop(struct isci_phy *iphy)
472 case SCI_PHY_READY: 495 case SCI_PHY_READY:
473 break; 496 break;
474 default: 497 default:
475 dev_dbg(sciphy_to_dev(iphy), 498 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
476 "%s: in wrong state: %d\n", __func__, state); 499 __func__, phy_state_name(state));
477 return SCI_FAILURE_INVALID_STATE; 500 return SCI_FAILURE_INVALID_STATE;
478 } 501 }
479 502
@@ -486,8 +509,8 @@ enum sci_status sci_phy_reset(struct isci_phy *iphy)
486 enum sci_phy_states state = iphy->sm.current_state_id; 509 enum sci_phy_states state = iphy->sm.current_state_id;
487 510
488 if (state != SCI_PHY_READY) { 511 if (state != SCI_PHY_READY) {
489 dev_dbg(sciphy_to_dev(iphy), 512 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
490 "%s: in wrong state: %d\n", __func__, state); 513 __func__, phy_state_name(state));
491 return SCI_FAILURE_INVALID_STATE; 514 return SCI_FAILURE_INVALID_STATE;
492 } 515 }
493 516
@@ -536,8 +559,8 @@ enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
536 return SCI_SUCCESS; 559 return SCI_SUCCESS;
537 } 560 }
538 default: 561 default:
539 dev_dbg(sciphy_to_dev(iphy), 562 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
540 "%s: in wrong state: %d\n", __func__, state); 563 __func__, phy_state_name(state));
541 return SCI_FAILURE_INVALID_STATE; 564 return SCI_FAILURE_INVALID_STATE;
542 } 565 }
543} 566}
@@ -591,6 +614,60 @@ static void sci_phy_complete_link_training(struct isci_phy *iphy,
591 sci_change_state(&iphy->sm, next_state); 614 sci_change_state(&iphy->sm, next_state);
592} 615}
593 616
617static const char *phy_event_name(u32 event_code)
618{
619 switch (scu_get_event_code(event_code)) {
620 case SCU_EVENT_PORT_SELECTOR_DETECTED:
621 return "port selector";
622 case SCU_EVENT_SENT_PORT_SELECTION:
623 return "port selection";
624 case SCU_EVENT_HARD_RESET_TRANSMITTED:
625 return "tx hard reset";
626 case SCU_EVENT_HARD_RESET_RECEIVED:
627 return "rx hard reset";
628 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
629 return "identify timeout";
630 case SCU_EVENT_LINK_FAILURE:
631 return "link fail";
632 case SCU_EVENT_SATA_SPINUP_HOLD:
633 return "sata spinup hold";
634 case SCU_EVENT_SAS_15_SSC:
635 case SCU_EVENT_SAS_15:
636 return "sas 1.5";
637 case SCU_EVENT_SAS_30_SSC:
638 case SCU_EVENT_SAS_30:
639 return "sas 3.0";
640 case SCU_EVENT_SAS_60_SSC:
641 case SCU_EVENT_SAS_60:
642 return "sas 6.0";
643 case SCU_EVENT_SATA_15_SSC:
644 case SCU_EVENT_SATA_15:
645 return "sata 1.5";
646 case SCU_EVENT_SATA_30_SSC:
647 case SCU_EVENT_SATA_30:
648 return "sata 3.0";
649 case SCU_EVENT_SATA_60_SSC:
650 case SCU_EVENT_SATA_60:
651 return "sata 6.0";
652 case SCU_EVENT_SAS_PHY_DETECTED:
653 return "sas detect";
654 case SCU_EVENT_SATA_PHY_DETECTED:
655 return "sata detect";
656 default:
657 return "unknown";
658 }
659}
660
661#define phy_event_dbg(iphy, state, code) \
662 dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
663 phy_to_host(iphy)->id, iphy->phy_index, \
664 phy_state_name(state), phy_event_name(code), code)
665
666#define phy_event_warn(iphy, state, code) \
667 dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
668 phy_to_host(iphy)->id, iphy->phy_index, \
669 phy_state_name(state), phy_event_name(code), code)
670
594enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) 671enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
595{ 672{
596 enum sci_phy_states state = iphy->sm.current_state_id; 673 enum sci_phy_states state = iphy->sm.current_state_id;
@@ -607,11 +684,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
607 iphy->is_in_link_training = true; 684 iphy->is_in_link_training = true;
608 break; 685 break;
609 default: 686 default:
610 dev_dbg(sciphy_to_dev(iphy), 687 phy_event_dbg(iphy, state, event_code);
611 "%s: PHY starting substate machine received "
612 "unexpected event_code %x\n",
613 __func__,
614 event_code);
615 return SCI_FAILURE; 688 return SCI_FAILURE;
616 } 689 }
617 return SCI_SUCCESS; 690 return SCI_SUCCESS;
@@ -648,11 +721,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
648 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 721 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
649 break; 722 break;
650 default: 723 default:
651 dev_warn(sciphy_to_dev(iphy), 724 phy_event_warn(iphy, state, event_code);
652 "%s: PHY starting substate machine received "
653 "unexpected event_code %x\n",
654 __func__, event_code);
655
656 return SCI_FAILURE; 725 return SCI_FAILURE;
657 break; 726 break;
658 } 727 }
@@ -677,10 +746,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
677 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 746 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
678 break; 747 break;
679 default: 748 default:
680 dev_warn(sciphy_to_dev(iphy), 749 phy_event_warn(iphy, state, event_code);
681 "%s: PHY starting substate machine received "
682 "unexpected event_code %x\n",
683 __func__, event_code);
684 return SCI_FAILURE; 750 return SCI_FAILURE;
685 } 751 }
686 return SCI_SUCCESS; 752 return SCI_SUCCESS;
@@ -691,11 +757,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
691 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 757 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
692 break; 758 break;
693 default: 759 default:
694 dev_warn(sciphy_to_dev(iphy), 760 phy_event_warn(iphy, state, event_code);
695 "%s: PHY starting substate machine received unexpected "
696 "event_code %x\n",
697 __func__,
698 event_code);
699 return SCI_FAILURE; 761 return SCI_FAILURE;
700 } 762 }
701 return SCI_SUCCESS; 763 return SCI_SUCCESS;
@@ -719,11 +781,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
719 break; 781 break;
720 782
721 default: 783 default:
722 dev_warn(sciphy_to_dev(iphy), 784 phy_event_warn(iphy, state, event_code);
723 "%s: PHY starting substate machine received "
724 "unexpected event_code %x\n",
725 __func__, event_code);
726
727 return SCI_FAILURE; 785 return SCI_FAILURE;
728 } 786 }
729 return SCI_SUCCESS; 787 return SCI_SUCCESS;
@@ -751,12 +809,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
751 sci_phy_start_sas_link_training(iphy); 809 sci_phy_start_sas_link_training(iphy);
752 break; 810 break;
753 default: 811 default:
754 dev_warn(sciphy_to_dev(iphy), 812 phy_event_warn(iphy, state, event_code);
755 "%s: PHY starting substate machine received "
756 "unexpected event_code %x\n",
757 __func__,
758 event_code);
759
760 return SCI_FAILURE; 813 return SCI_FAILURE;
761 } 814 }
762 return SCI_SUCCESS; 815 return SCI_SUCCESS;
@@ -793,11 +846,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
793 sci_phy_start_sas_link_training(iphy); 846 sci_phy_start_sas_link_training(iphy);
794 break; 847 break;
795 default: 848 default:
796 dev_warn(sciphy_to_dev(iphy), 849 phy_event_warn(iphy, state, event_code);
797 "%s: PHY starting substate machine received "
798 "unexpected event_code %x\n",
799 __func__, event_code);
800
801 return SCI_FAILURE; 850 return SCI_FAILURE;
802 } 851 }
803 852
@@ -815,12 +864,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
815 break; 864 break;
816 865
817 default: 866 default:
818 dev_warn(sciphy_to_dev(iphy), 867 phy_event_warn(iphy, state, event_code);
819 "%s: PHY starting substate machine received "
820 "unexpected event_code %x\n",
821 __func__,
822 event_code);
823
824 return SCI_FAILURE; 868 return SCI_FAILURE;
825 } 869 }
826 return SCI_SUCCESS; 870 return SCI_SUCCESS;
@@ -838,10 +882,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
838 iphy->bcn_received_while_port_unassigned = true; 882 iphy->bcn_received_while_port_unassigned = true;
839 break; 883 break;
840 default: 884 default:
841 dev_warn(sciphy_to_dev(iphy), 885 phy_event_warn(iphy, state, event_code);
842 "%sP SCIC PHY 0x%p ready state machine received "
843 "unexpected event_code %x\n",
844 __func__, iphy, event_code);
845 return SCI_FAILURE_INVALID_STATE; 886 return SCI_FAILURE_INVALID_STATE;
846 } 887 }
847 return SCI_SUCCESS; 888 return SCI_SUCCESS;
@@ -852,18 +893,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
852 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 893 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
853 break; 894 break;
854 default: 895 default:
855 dev_warn(sciphy_to_dev(iphy), 896 phy_event_warn(iphy, state, event_code);
856 "%s: SCIC PHY 0x%p resetting state machine received "
857 "unexpected event_code %x\n",
858 __func__, iphy, event_code);
859
860 return SCI_FAILURE_INVALID_STATE; 897 return SCI_FAILURE_INVALID_STATE;
861 break; 898 break;
862 } 899 }
863 return SCI_SUCCESS; 900 return SCI_SUCCESS;
864 default: 901 default:
865 dev_dbg(sciphy_to_dev(iphy), 902 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
866 "%s: in wrong state: %d\n", __func__, state); 903 __func__, phy_state_name(state));
867 return SCI_FAILURE_INVALID_STATE; 904 return SCI_FAILURE_INVALID_STATE;
868 } 905 }
869} 906}
@@ -956,8 +993,8 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
956 return result; 993 return result;
957 } 994 }
958 default: 995 default:
959 dev_dbg(sciphy_to_dev(iphy), 996 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
960 "%s: in wrong state: %d\n", __func__, state); 997 __func__, phy_state_name(state));
961 return SCI_FAILURE_INVALID_STATE; 998 return SCI_FAILURE_INVALID_STATE;
962 } 999 }
963 1000
@@ -1299,7 +1336,6 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
1299 sas_addr = cpu_to_be64(sci_sas_addr); 1336 sas_addr = cpu_to_be64(sci_sas_addr);
1300 memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); 1337 memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
1301 1338
1302 iphy->isci_port = NULL;
1303 iphy->sas_phy.enabled = 0; 1339 iphy->sas_phy.enabled = 0;
1304 iphy->sas_phy.id = index; 1340 iphy->sas_phy.id = index;
1305 iphy->sas_phy.sas_addr = &iphy->sas_addr[0]; 1341 iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
@@ -1333,13 +1369,13 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
1333{ 1369{
1334 int ret = 0; 1370 int ret = 0;
1335 struct isci_phy *iphy = sas_phy->lldd_phy; 1371 struct isci_phy *iphy = sas_phy->lldd_phy;
1336 struct isci_port *iport = iphy->isci_port; 1372 struct asd_sas_port *port = sas_phy->port;
1337 struct isci_host *ihost = sas_phy->ha->lldd_ha; 1373 struct isci_host *ihost = sas_phy->ha->lldd_ha;
1338 unsigned long flags; 1374 unsigned long flags;
1339 1375
1340 dev_dbg(&ihost->pdev->dev, 1376 dev_dbg(&ihost->pdev->dev,
1341 "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n", 1377 "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
1342 __func__, sas_phy, func, buf, iphy, iport); 1378 __func__, sas_phy, func, buf, iphy, port);
1343 1379
1344 switch (func) { 1380 switch (func) {
1345 case PHY_FUNC_DISABLE: 1381 case PHY_FUNC_DISABLE:
@@ -1356,11 +1392,10 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
1356 break; 1392 break;
1357 1393
1358 case PHY_FUNC_HARD_RESET: 1394 case PHY_FUNC_HARD_RESET:
1359 if (!iport) 1395 if (!port)
1360 return -ENODEV; 1396 return -ENODEV;
1361 1397
1362 /* Perform the port reset. */ 1398 ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy);
1363 ret = isci_port_perform_hard_reset(ihost, iport, iphy);
1364 1399
1365 break; 1400 break;
1366 case PHY_FUNC_GET_EVENTS: { 1401 case PHY_FUNC_GET_EVENTS: {
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 67699c8e321c..0e45833ba06d 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -103,7 +103,6 @@ struct isci_phy {
103 struct scu_transport_layer_registers __iomem *transport_layer_registers; 103 struct scu_transport_layer_registers __iomem *transport_layer_registers;
104 struct scu_link_layer_registers __iomem *link_layer_registers; 104 struct scu_link_layer_registers __iomem *link_layer_registers;
105 struct asd_sas_phy sas_phy; 105 struct asd_sas_phy sas_phy;
106 struct isci_port *isci_port;
107 u8 sas_addr[SAS_ADDR_SIZE]; 106 u8 sas_addr[SAS_ADDR_SIZE];
108 union { 107 union {
109 struct sas_identify_frame iaf; 108 struct sas_identify_frame iaf;
@@ -344,101 +343,65 @@ enum sci_phy_counter_id {
344 SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR 343 SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
345}; 344};
346 345
347enum sci_phy_states { 346/**
348 /** 347 * enum sci_phy_states - phy state machine states
349 * Simply the initial state for the base domain state machine. 348 * @SCI_PHY_INITIAL: Simply the initial state for the base domain state
350 */ 349 * machine.
351 SCI_PHY_INITIAL, 350 * @SCI_PHY_STOPPED: phy has successfully been stopped. In this state
352 351 * no new IO operations are permitted on this phy.
353 /** 352 * @SCI_PHY_STARTING: the phy is in the process of becomming ready. In
354 * This state indicates that the phy has successfully been stopped. 353 * this state no new IO operations are permitted on
355 * In this state no new IO operations are permitted on this phy. 354 * this phy.
356 * This state is entered from the INITIAL state. 355 * @SCI_PHY_SUB_INITIAL: Initial state
357 * This state is entered from the STARTING state. 356 * @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event
358 * This state is entered from the READY state. 357 * type notification
359 * This state is entered from the RESETTING state. 358 * @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed
360 */ 359 * notification
361 SCI_PHY_STOPPED, 360 * @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame
362 361 * notification
363 /** 362 * @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume
364 * This state indicates that the phy is in the process of becomming 363 * power
365 * ready. In this state no new IO operations are permitted on this phy. 364 * @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume
366 * This state is entered from the STOPPED state. 365 * power
367 * This state is entered from the READY state. 366 * @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY
368 * This state is entered from the RESETTING state. 367 * notification
369 */ 368 * @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed
370 SCI_PHY_STARTING, 369 * notification
371 370 * @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS
372 /** 371 * unsolicited frame notification
373 * Initial state 372 * @SCI_PHY_SUB_FINAL: Exit state for this state machine
374 */ 373 * @SCI_PHY_READY: phy is now ready. Thus, the user is able to perform
375 SCI_PHY_SUB_INITIAL, 374 * IO operations utilizing this phy as long as it is
376 375 * currently part of a valid port. This state is
377 /** 376 * entered from the STARTING state.
378 * Wait state for the hardware OSSP event type notification 377 * @SCI_PHY_RESETTING: phy is in the process of being reset. In this
379 */ 378 * state no new IO operations are permitted on this
380 SCI_PHY_SUB_AWAIT_OSSP_EN, 379 * phy. This state is entered from the READY state.
381 380 * @SCI_PHY_FINAL: Simply the final state for the base phy state
382 /** 381 * machine.
383 * Wait state for the PHY speed notification 382 */
384 */ 383#define PHY_STATES {\
385 SCI_PHY_SUB_AWAIT_SAS_SPEED_EN, 384 C(PHY_INITIAL),\
386 385 C(PHY_STOPPED),\
387 /** 386 C(PHY_STARTING),\
388 * Wait state for the IAF Unsolicited frame notification 387 C(PHY_SUB_INITIAL),\
389 */ 388 C(PHY_SUB_AWAIT_OSSP_EN),\
390 SCI_PHY_SUB_AWAIT_IAF_UF, 389 C(PHY_SUB_AWAIT_SAS_SPEED_EN),\
391 390 C(PHY_SUB_AWAIT_IAF_UF),\
392 /** 391 C(PHY_SUB_AWAIT_SAS_POWER),\
393 * Wait state for the request to consume power 392 C(PHY_SUB_AWAIT_SATA_POWER),\
394 */ 393 C(PHY_SUB_AWAIT_SATA_PHY_EN),\
395 SCI_PHY_SUB_AWAIT_SAS_POWER, 394 C(PHY_SUB_AWAIT_SATA_SPEED_EN),\
396 395 C(PHY_SUB_AWAIT_SIG_FIS_UF),\
397 /** 396 C(PHY_SUB_FINAL),\
398 * Wait state for request to consume power 397 C(PHY_READY),\
399 */ 398 C(PHY_RESETTING),\
400 SCI_PHY_SUB_AWAIT_SATA_POWER, 399 C(PHY_FINAL),\
401 400 }
402 /** 401#undef C
403 * Wait state for the SATA PHY notification 402#define C(a) SCI_##a
404 */ 403enum sci_phy_states PHY_STATES;
405 SCI_PHY_SUB_AWAIT_SATA_PHY_EN, 404#undef C
406
407 /**
408 * Wait for the SATA PHY speed notification
409 */
410 SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
411
412 /**
413 * Wait state for the SIGNATURE FIS unsolicited frame notification
414 */
415 SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
416
417 /**
418 * Exit state for this state machine
419 */
420 SCI_PHY_SUB_FINAL,
421
422 /**
423 * This state indicates the the phy is now ready. Thus, the user
424 * is able to perform IO operations utilizing this phy as long as it
425 * is currently part of a valid port.
426 * This state is entered from the STARTING state.
427 */
428 SCI_PHY_READY,
429
430 /**
431 * This state indicates that the phy is in the process of being reset.
432 * In this state no new IO operations are permitted on this phy.
433 * This state is entered from the READY state.
434 */
435 SCI_PHY_RESETTING,
436
437 /**
438 * Simply the final state for the base phy state machine.
439 */
440 SCI_PHY_FINAL,
441};
442 405
443void sci_phy_construct( 406void sci_phy_construct(
444 struct isci_phy *iphy, 407 struct isci_phy *iphy,
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 7c6ac58a5c4c..5fada73b71ff 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -60,18 +60,29 @@
60#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) 60#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
61#define SCU_DUMMY_INDEX (0xFFFF) 61#define SCU_DUMMY_INDEX (0xFFFF)
62 62
63static void isci_port_change_state(struct isci_port *iport, enum isci_status status) 63#undef C
64#define C(a) (#a)
65const char *port_state_name(enum sci_port_states state)
64{ 66{
65 unsigned long flags; 67 static const char * const strings[] = PORT_STATES;
68
69 return strings[state];
70}
71#undef C
72
73static struct device *sciport_to_dev(struct isci_port *iport)
74{
75 int i = iport->physical_port_index;
76 struct isci_port *table;
77 struct isci_host *ihost;
78
79 if (i == SCIC_SDS_DUMMY_PORT)
80 i = SCI_MAX_PORTS+1;
66 81
67 dev_dbg(&iport->isci_host->pdev->dev, 82 table = iport - i;
68 "%s: iport = %p, state = 0x%x\n", 83 ihost = container_of(table, typeof(*ihost), ports[0]);
69 __func__, iport, status);
70 84
71 /* XXX pointless lock */ 85 return &ihost->pdev->dev;
72 spin_lock_irqsave(&iport->state_lock, flags);
73 iport->status = status;
74 spin_unlock_irqrestore(&iport->state_lock, flags);
75} 86}
76 87
77static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) 88static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
@@ -165,18 +176,12 @@ static void isci_port_link_up(struct isci_host *isci_host,
165 struct sci_port_properties properties; 176 struct sci_port_properties properties;
166 unsigned long success = true; 177 unsigned long success = true;
167 178
168 BUG_ON(iphy->isci_port != NULL);
169
170 iphy->isci_port = iport;
171
172 dev_dbg(&isci_host->pdev->dev, 179 dev_dbg(&isci_host->pdev->dev,
173 "%s: isci_port = %p\n", 180 "%s: isci_port = %p\n",
174 __func__, iport); 181 __func__, iport);
175 182
176 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); 183 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
177 184
178 isci_port_change_state(iphy->isci_port, isci_starting);
179
180 sci_port_get_properties(iport, &properties); 185 sci_port_get_properties(iport, &properties);
181 186
182 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { 187 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
@@ -258,7 +263,6 @@ static void isci_port_link_down(struct isci_host *isci_host,
258 __func__, isci_device); 263 __func__, isci_device);
259 set_bit(IDEV_GONE, &isci_device->flags); 264 set_bit(IDEV_GONE, &isci_device->flags);
260 } 265 }
261 isci_port_change_state(isci_port, isci_stopping);
262 } 266 }
263 } 267 }
264 268
@@ -269,52 +273,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
269 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy, 273 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
270 PHYE_LOSS_OF_SIGNAL); 274 PHYE_LOSS_OF_SIGNAL);
271 275
272 isci_phy->isci_port = NULL;
273
274 dev_dbg(&isci_host->pdev->dev, 276 dev_dbg(&isci_host->pdev->dev,
275 "%s: isci_port = %p - Done\n", __func__, isci_port); 277 "%s: isci_port = %p - Done\n", __func__, isci_port);
276} 278}
277 279
278
279/**
280 * isci_port_ready() - This function is called by the sci core when a link
281 * becomes ready.
282 * @isci_host: This parameter specifies the isci host object.
283 * @port: This parameter specifies the sci port with the active link.
284 *
285 */
286static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
287{
288 dev_dbg(&isci_host->pdev->dev,
289 "%s: isci_port = %p\n", __func__, isci_port);
290
291 complete_all(&isci_port->start_complete);
292 isci_port_change_state(isci_port, isci_ready);
293 return;
294}
295
296/**
297 * isci_port_not_ready() - This function is called by the sci core when a link
298 * is not ready. All remote devices on this link will be removed if they are
299 * in the stopping state.
300 * @isci_host: This parameter specifies the isci host object.
301 * @port: This parameter specifies the sci port with the active link.
302 *
303 */
304static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
305{
306 dev_dbg(&isci_host->pdev->dev,
307 "%s: isci_port = %p\n", __func__, isci_port);
308}
309
310static void isci_port_stop_complete(struct isci_host *ihost,
311 struct isci_port *iport,
312 enum sci_status completion_status)
313{
314 dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
315}
316
317
318static bool is_port_ready_state(enum sci_port_states state) 280static bool is_port_ready_state(enum sci_port_states state)
319{ 281{
320 switch (state) { 282 switch (state) {
@@ -353,7 +315,9 @@ static void port_state_machine_change(struct isci_port *iport,
353static void isci_port_hard_reset_complete(struct isci_port *isci_port, 315static void isci_port_hard_reset_complete(struct isci_port *isci_port,
354 enum sci_status completion_status) 316 enum sci_status completion_status)
355{ 317{
356 dev_dbg(&isci_port->isci_host->pdev->dev, 318 struct isci_host *ihost = isci_port->owning_controller;
319
320 dev_dbg(&ihost->pdev->dev,
357 "%s: isci_port = %p, completion_status=%x\n", 321 "%s: isci_port = %p, completion_status=%x\n",
358 __func__, isci_port, completion_status); 322 __func__, isci_port, completion_status);
359 323
@@ -364,23 +328,24 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port,
364 328
365 /* The reset failed. The port state is now SCI_PORT_FAILED. */ 329 /* The reset failed. The port state is now SCI_PORT_FAILED. */
366 if (isci_port->active_phy_mask == 0) { 330 if (isci_port->active_phy_mask == 0) {
331 int phy_idx = isci_port->last_active_phy;
332 struct isci_phy *iphy = &ihost->phys[phy_idx];
367 333
368 /* Generate the link down now to the host, since it 334 /* Generate the link down now to the host, since it
369 * was intercepted by the hard reset state machine when 335 * was intercepted by the hard reset state machine when
370 * it really happened. 336 * it really happened.
371 */ 337 */
372 isci_port_link_down(isci_port->isci_host, 338 isci_port_link_down(ihost, iphy, isci_port);
373 &isci_port->isci_host->phys[
374 isci_port->last_active_phy],
375 isci_port);
376 } 339 }
377 /* Advance the port state so that link state changes will be 340 /* Advance the port state so that link state changes will be
378 * noticed. 341 * noticed.
379 */ 342 */
380 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING); 343 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
381 344
382 } 345 }
383 complete_all(&isci_port->hard_reset_complete); 346 clear_bit(IPORT_RESET_PENDING, &isci_port->state);
347 wake_up(&ihost->eventq);
348
384} 349}
385 350
386/* This method will return a true value if the specified phy can be assigned to 351/* This method will return a true value if the specified phy can be assigned to
@@ -835,10 +800,9 @@ static void port_timeout(unsigned long data)
835 __func__, 800 __func__,
836 iport); 801 iport);
837 } else if (current_state == SCI_PORT_STOPPING) { 802 } else if (current_state == SCI_PORT_STOPPING) {
838 /* if the port is still stopping then the stop has not completed */ 803 dev_dbg(sciport_to_dev(iport),
839 isci_port_stop_complete(iport->owning_controller, 804 "%s: port%d: stop complete timeout\n",
840 iport, 805 __func__, iport->physical_port_index);
841 SCI_FAILURE_TIMEOUT);
842 } else { 806 } else {
843 /* The port is in the ready state and we have a timer 807 /* The port is in the ready state and we have a timer
844 * reporting a timeout this should not happen. 808 * reporting a timeout this should not happen.
@@ -1003,7 +967,8 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
1003 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 967 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1004 struct isci_host *ihost = iport->owning_controller; 968 struct isci_host *ihost = iport->owning_controller;
1005 969
1006 isci_port_ready(ihost, iport); 970 dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
971 __func__, iport->physical_port_index);
1007 972
1008 for (index = 0; index < SCI_MAX_PHYS; index++) { 973 for (index = 0; index < SCI_MAX_PHYS; index++) {
1009 if (iport->phy_table[index]) { 974 if (iport->phy_table[index]) {
@@ -1069,7 +1034,8 @@ static void sci_port_ready_substate_operational_exit(struct sci_base_state_machi
1069 */ 1034 */
1070 sci_port_abort_dummy_request(iport); 1035 sci_port_abort_dummy_request(iport);
1071 1036
1072 isci_port_not_ready(ihost, iport); 1037 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1038 __func__, iport->physical_port_index);
1073 1039
1074 if (iport->ready_exit) 1040 if (iport->ready_exit)
1075 sci_port_invalidate_dummy_remote_node(iport); 1041 sci_port_invalidate_dummy_remote_node(iport);
@@ -1081,7 +1047,8 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
1081 struct isci_host *ihost = iport->owning_controller; 1047 struct isci_host *ihost = iport->owning_controller;
1082 1048
1083 if (iport->active_phy_mask == 0) { 1049 if (iport->active_phy_mask == 0) {
1084 isci_port_not_ready(ihost, iport); 1050 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1051 __func__, iport->physical_port_index);
1085 1052
1086 port_state_machine_change(iport, SCI_PORT_SUB_WAITING); 1053 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1087 } else 1054 } else
@@ -1097,8 +1064,8 @@ enum sci_status sci_port_start(struct isci_port *iport)
1097 1064
1098 state = iport->sm.current_state_id; 1065 state = iport->sm.current_state_id;
1099 if (state != SCI_PORT_STOPPED) { 1066 if (state != SCI_PORT_STOPPED) {
1100 dev_warn(sciport_to_dev(iport), 1067 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1101 "%s: in wrong state: %d\n", __func__, state); 1068 __func__, port_state_name(state));
1102 return SCI_FAILURE_INVALID_STATE; 1069 return SCI_FAILURE_INVALID_STATE;
1103 } 1070 }
1104 1071
@@ -1172,8 +1139,8 @@ enum sci_status sci_port_stop(struct isci_port *iport)
1172 SCI_PORT_STOPPING); 1139 SCI_PORT_STOPPING);
1173 return SCI_SUCCESS; 1140 return SCI_SUCCESS;
1174 default: 1141 default:
1175 dev_warn(sciport_to_dev(iport), 1142 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1176 "%s: in wrong state: %d\n", __func__, state); 1143 __func__, port_state_name(state));
1177 return SCI_FAILURE_INVALID_STATE; 1144 return SCI_FAILURE_INVALID_STATE;
1178 } 1145 }
1179} 1146}
@@ -1187,8 +1154,8 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1187 1154
1188 state = iport->sm.current_state_id; 1155 state = iport->sm.current_state_id;
1189 if (state != SCI_PORT_SUB_OPERATIONAL) { 1156 if (state != SCI_PORT_SUB_OPERATIONAL) {
1190 dev_warn(sciport_to_dev(iport), 1157 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1191 "%s: in wrong state: %d\n", __func__, state); 1158 __func__, port_state_name(state));
1192 return SCI_FAILURE_INVALID_STATE; 1159 return SCI_FAILURE_INVALID_STATE;
1193 } 1160 }
1194 1161
@@ -1282,8 +1249,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1282 SCI_PORT_SUB_CONFIGURING); 1249 SCI_PORT_SUB_CONFIGURING);
1283 return SCI_SUCCESS; 1250 return SCI_SUCCESS;
1284 default: 1251 default:
1285 dev_warn(sciport_to_dev(iport), 1252 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1286 "%s: in wrong state: %d\n", __func__, state); 1253 __func__, port_state_name(state));
1287 return SCI_FAILURE_INVALID_STATE; 1254 return SCI_FAILURE_INVALID_STATE;
1288 } 1255 }
1289} 1256}
@@ -1332,8 +1299,8 @@ enum sci_status sci_port_remove_phy(struct isci_port *iport,
1332 SCI_PORT_SUB_CONFIGURING); 1299 SCI_PORT_SUB_CONFIGURING);
1333 return SCI_SUCCESS; 1300 return SCI_SUCCESS;
1334 default: 1301 default:
1335 dev_warn(sciport_to_dev(iport), 1302 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1336 "%s: in wrong state: %d\n", __func__, state); 1303 __func__, port_state_name(state));
1337 return SCI_FAILURE_INVALID_STATE; 1304 return SCI_FAILURE_INVALID_STATE;
1338 } 1305 }
1339} 1306}
@@ -1375,8 +1342,8 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
1375 sci_port_general_link_up_handler(iport, iphy, PF_RESUME); 1342 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1376 return SCI_SUCCESS; 1343 return SCI_SUCCESS;
1377 default: 1344 default:
1378 dev_warn(sciport_to_dev(iport), 1345 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1379 "%s: in wrong state: %d\n", __func__, state); 1346 __func__, port_state_name(state));
1380 return SCI_FAILURE_INVALID_STATE; 1347 return SCI_FAILURE_INVALID_STATE;
1381 } 1348 }
1382} 1349}
@@ -1405,8 +1372,8 @@ enum sci_status sci_port_link_down(struct isci_port *iport,
1405 sci_port_deactivate_phy(iport, iphy, false); 1372 sci_port_deactivate_phy(iport, iphy, false);
1406 return SCI_SUCCESS; 1373 return SCI_SUCCESS;
1407 default: 1374 default:
1408 dev_warn(sciport_to_dev(iport), 1375 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1409 "%s: in wrong state: %d\n", __func__, state); 1376 __func__, port_state_name(state));
1410 return SCI_FAILURE_INVALID_STATE; 1377 return SCI_FAILURE_INVALID_STATE;
1411 } 1378 }
1412} 1379}
@@ -1425,8 +1392,8 @@ enum sci_status sci_port_start_io(struct isci_port *iport,
1425 iport->started_request_count++; 1392 iport->started_request_count++;
1426 return SCI_SUCCESS; 1393 return SCI_SUCCESS;
1427 default: 1394 default:
1428 dev_warn(sciport_to_dev(iport), 1395 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1429 "%s: in wrong state: %d\n", __func__, state); 1396 __func__, port_state_name(state));
1430 return SCI_FAILURE_INVALID_STATE; 1397 return SCI_FAILURE_INVALID_STATE;
1431 } 1398 }
1432} 1399}
@@ -1440,8 +1407,8 @@ enum sci_status sci_port_complete_io(struct isci_port *iport,
1440 state = iport->sm.current_state_id; 1407 state = iport->sm.current_state_id;
1441 switch (state) { 1408 switch (state) {
1442 case SCI_PORT_STOPPED: 1409 case SCI_PORT_STOPPED:
1443 dev_warn(sciport_to_dev(iport), 1410 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1444 "%s: in wrong state: %d\n", __func__, state); 1411 __func__, port_state_name(state));
1445 return SCI_FAILURE_INVALID_STATE; 1412 return SCI_FAILURE_INVALID_STATE;
1446 case SCI_PORT_STOPPING: 1413 case SCI_PORT_STOPPING:
1447 sci_port_decrement_request_count(iport); 1414 sci_port_decrement_request_count(iport);
@@ -1547,7 +1514,8 @@ static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1547 if (prev_state == SCI_PORT_RESETTING) 1514 if (prev_state == SCI_PORT_RESETTING)
1548 isci_port_hard_reset_complete(iport, SCI_SUCCESS); 1515 isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1549 else 1516 else
1550 isci_port_not_ready(ihost, iport); 1517 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1518 __func__, iport->physical_port_index);
1551 1519
1552 /* Post and suspend the dummy remote node context for this port. */ 1520 /* Post and suspend the dummy remote node context for this port. */
1553 sci_port_post_dummy_remote_node(iport); 1521 sci_port_post_dummy_remote_node(iport);
@@ -1644,22 +1612,7 @@ void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
1644{ 1612{
1645 INIT_LIST_HEAD(&iport->remote_dev_list); 1613 INIT_LIST_HEAD(&iport->remote_dev_list);
1646 INIT_LIST_HEAD(&iport->domain_dev_list); 1614 INIT_LIST_HEAD(&iport->domain_dev_list);
1647 spin_lock_init(&iport->state_lock);
1648 init_completion(&iport->start_complete);
1649 iport->isci_host = ihost; 1615 iport->isci_host = ihost;
1650 isci_port_change_state(iport, isci_freed);
1651}
1652
1653/**
1654 * isci_port_get_state() - This function gets the status of the port object.
1655 * @isci_port: This parameter points to the isci_port object
1656 *
1657 * status of the object as a isci_status enum.
1658 */
1659enum isci_status isci_port_get_state(
1660 struct isci_port *isci_port)
1661{
1662 return isci_port->status;
1663} 1616}
1664 1617
1665void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) 1618void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
@@ -1670,6 +1623,11 @@ void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy
1670 isci_port_bc_change_received(ihost, iport, iphy); 1623 isci_port_bc_change_received(ihost, iport, iphy);
1671} 1624}
1672 1625
1626static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
1627{
1628 wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
1629}
1630
1673int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, 1631int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1674 struct isci_phy *iphy) 1632 struct isci_phy *iphy)
1675{ 1633{
@@ -1680,9 +1638,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1680 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", 1638 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1681 __func__, iport); 1639 __func__, iport);
1682 1640
1683 init_completion(&iport->hard_reset_complete);
1684
1685 spin_lock_irqsave(&ihost->scic_lock, flags); 1641 spin_lock_irqsave(&ihost->scic_lock, flags);
1642 set_bit(IPORT_RESET_PENDING, &iport->state);
1686 1643
1687 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT 1644 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1688 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); 1645 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
@@ -1690,7 +1647,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1690 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1647 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1691 1648
1692 if (status == SCI_SUCCESS) { 1649 if (status == SCI_SUCCESS) {
1693 wait_for_completion(&iport->hard_reset_complete); 1650 wait_port_reset(ihost, iport);
1694 1651
1695 dev_dbg(&ihost->pdev->dev, 1652 dev_dbg(&ihost->pdev->dev,
1696 "%s: iport = %p; hard reset completion\n", 1653 "%s: iport = %p; hard reset completion\n",
@@ -1704,6 +1661,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1704 __func__, iport, iport->hard_reset_status); 1661 __func__, iport, iport->hard_reset_status);
1705 } 1662 }
1706 } else { 1663 } else {
1664 clear_bit(IPORT_RESET_PENDING, &iport->state);
1665 wake_up(&ihost->eventq);
1707 ret = TMF_RESP_FUNC_FAILED; 1666 ret = TMF_RESP_FUNC_FAILED;
1708 1667
1709 dev_err(&ihost->pdev->dev, 1668 dev_err(&ihost->pdev->dev,
@@ -1726,24 +1685,80 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1726 return ret; 1685 return ret;
1727} 1686}
1728 1687
1729/** 1688int isci_ata_check_ready(struct domain_device *dev)
1730 * isci_port_deformed() - This function is called by libsas when a port becomes 1689{
1731 * inactive. 1690 struct isci_port *iport = dev->port->lldd_port;
1732 * @phy: This parameter specifies the libsas phy with the inactive port. 1691 struct isci_host *ihost = dev_to_ihost(dev);
1733 * 1692 struct isci_remote_device *idev;
1734 */ 1693 unsigned long flags;
1694 int rc = 0;
1695
1696 spin_lock_irqsave(&ihost->scic_lock, flags);
1697 idev = isci_lookup_device(dev);
1698 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1699
1700 if (!idev)
1701 goto out;
1702
1703 if (test_bit(IPORT_RESET_PENDING, &iport->state))
1704 goto out;
1705
1706 rc = !!iport->active_phy_mask;
1707 out:
1708 isci_put_device(idev);
1709
1710 return rc;
1711}
1712
1735void isci_port_deformed(struct asd_sas_phy *phy) 1713void isci_port_deformed(struct asd_sas_phy *phy)
1736{ 1714{
1737 pr_debug("%s: sas_phy = %p\n", __func__, phy); 1715 struct isci_host *ihost = phy->ha->lldd_ha;
1716 struct isci_port *iport = phy->port->lldd_port;
1717 unsigned long flags;
1718 int i;
1719
1720 /* we got a port notification on a port that was subsequently
1721 * torn down and libsas is just now catching up
1722 */
1723 if (!iport)
1724 return;
1725
1726 spin_lock_irqsave(&ihost->scic_lock, flags);
1727 for (i = 0; i < SCI_MAX_PHYS; i++) {
1728 if (iport->active_phy_mask & 1 << i)
1729 break;
1730 }
1731 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1732
1733 if (i >= SCI_MAX_PHYS)
1734 dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
1735 __func__, (long) (iport - &ihost->ports[0]));
1738} 1736}
1739 1737
1740/**
1741 * isci_port_formed() - This function is called by libsas when a port becomes
1742 * active.
1743 * @phy: This parameter specifies the libsas phy with the active port.
1744 *
1745 */
1746void isci_port_formed(struct asd_sas_phy *phy) 1738void isci_port_formed(struct asd_sas_phy *phy)
1747{ 1739{
1748 pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port); 1740 struct isci_host *ihost = phy->ha->lldd_ha;
1741 struct isci_phy *iphy = to_iphy(phy);
1742 struct asd_sas_port *port = phy->port;
1743 struct isci_port *iport;
1744 unsigned long flags;
1745 int i;
1746
1747 /* initial ports are formed as the driver is still initializing,
1748 * wait for that process to complete
1749 */
1750 wait_for_start(ihost);
1751
1752 spin_lock_irqsave(&ihost->scic_lock, flags);
1753 for (i = 0; i < SCI_MAX_PORTS; i++) {
1754 iport = &ihost->ports[i];
1755 if (iport->active_phy_mask & 1 << iphy->phy_index)
1756 break;
1757 }
1758 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1759
1760 if (i >= SCI_MAX_PORTS)
1761 iport = NULL;
1762
1763 port->lldd_port = iport;
1749} 1764}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 08116090eb70..6b56240c2051 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -95,14 +95,11 @@ enum isci_status {
95 * @timer: timeout start/stop operations 95 * @timer: timeout start/stop operations
96 */ 96 */
97struct isci_port { 97struct isci_port {
98 enum isci_status status;
99 struct isci_host *isci_host; 98 struct isci_host *isci_host;
100 struct asd_sas_port sas_port;
101 struct list_head remote_dev_list; 99 struct list_head remote_dev_list;
102 spinlock_t state_lock;
103 struct list_head domain_dev_list; 100 struct list_head domain_dev_list;
104 struct completion start_complete; 101 #define IPORT_RESET_PENDING 0
105 struct completion hard_reset_complete; 102 unsigned long state;
106 enum sci_status hard_reset_status; 103 enum sci_status hard_reset_status;
107 struct sci_base_state_machine sm; 104 struct sci_base_state_machine sm;
108 bool ready_exit; 105 bool ready_exit;
@@ -147,70 +144,47 @@ struct sci_port_properties {
147}; 144};
148 145
149/** 146/**
150 * enum sci_port_states - This enumeration depicts all the states for the 147 * enum sci_port_states - port state machine states
151 * common port state machine. 148 * @SCI_PORT_STOPPED: port has successfully been stopped. In this state
152 * 149 * no new IO operations are permitted. This state is
153 * 150 * entered from the STOPPING state.
151 * @SCI_PORT_STOPPING: port is in the process of stopping. In this
152 * state no new IO operations are permitted, but
153 * existing IO operations are allowed to complete.
154 * This state is entered from the READY state.
155 * @SCI_PORT_READY: port is now ready. Thus, the user is able to
156 * perform IO operations on this port. This state is
157 * entered from the STARTING state.
158 * @SCI_PORT_SUB_WAITING: port is started and ready but has no active
159 * phys.
160 * @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at
161 * least one phy operational.
162 * @SCI_PORT_SUB_CONFIGURING: port is started and there was an
163 * add/remove phy event. This state is only
164 * used in Automatic Port Configuration Mode
165 * (APC)
166 * @SCI_PORT_RESETTING: port is in the process of performing a hard
167 * reset. Thus, the user is unable to perform IO
168 * operations on this port. This state is entered
169 * from the READY state.
170 * @SCI_PORT_FAILED: port has failed a reset request. This state is
171 * entered when a port reset request times out. This
172 * state is entered from the RESETTING state.
154 */ 173 */
155enum sci_port_states { 174#define PORT_STATES {\
156 /** 175 C(PORT_STOPPED),\
157 * This state indicates that the port has successfully been stopped. 176 C(PORT_STOPPING),\
158 * In this state no new IO operations are permitted. 177 C(PORT_READY),\
159 * This state is entered from the STOPPING state. 178 C(PORT_SUB_WAITING),\
160 */ 179 C(PORT_SUB_OPERATIONAL),\
161 SCI_PORT_STOPPED, 180 C(PORT_SUB_CONFIGURING),\
162 181 C(PORT_RESETTING),\
163 /** 182 C(PORT_FAILED),\
164 * This state indicates that the port is in the process of stopping. 183 }
165 * In this state no new IO operations are permitted, but existing IO 184#undef C
166 * operations are allowed to complete. 185#define C(a) SCI_##a
167 * This state is entered from the READY state. 186enum sci_port_states PORT_STATES;
168 */ 187#undef C
169 SCI_PORT_STOPPING,
170
171 /**
172 * This state indicates the port is now ready. Thus, the user is
173 * able to perform IO operations on this port.
174 * This state is entered from the STARTING state.
175 */
176 SCI_PORT_READY,
177
178 /**
179 * The substate where the port is started and ready but has no
180 * active phys.
181 */
182 SCI_PORT_SUB_WAITING,
183
184 /**
185 * The substate where the port is started and ready and there is
186 * at least one phy operational.
187 */
188 SCI_PORT_SUB_OPERATIONAL,
189
190 /**
191 * The substate where the port is started and there was an
192 * add/remove phy event. This state is only used in Automatic
193 * Port Configuration Mode (APC)
194 */
195 SCI_PORT_SUB_CONFIGURING,
196
197 /**
198 * This state indicates the port is in the process of performing a hard
199 * reset. Thus, the user is unable to perform IO operations on this
200 * port.
201 * This state is entered from the READY state.
202 */
203 SCI_PORT_RESETTING,
204
205 /**
206 * This state indicates the port has failed a reset request. This state
207 * is entered when a port reset request times out.
208 * This state is entered from the RESETTING state.
209 */
210 SCI_PORT_FAILED,
211
212
213};
214 188
215static inline void sci_port_decrement_request_count(struct isci_port *iport) 189static inline void sci_port_decrement_request_count(struct isci_port *iport)
216{ 190{
@@ -296,9 +270,6 @@ void sci_port_get_attached_sas_address(
296 struct isci_port *iport, 270 struct isci_port *iport,
297 struct sci_sas_address *sas_address); 271 struct sci_sas_address *sas_address);
298 272
299enum isci_status isci_port_get_state(
300 struct isci_port *isci_port);
301
302void isci_port_formed(struct asd_sas_phy *); 273void isci_port_formed(struct asd_sas_phy *);
303void isci_port_deformed(struct asd_sas_phy *); 274void isci_port_deformed(struct asd_sas_phy *);
304 275
@@ -309,4 +280,5 @@ void isci_port_init(
309 280
310int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, 281int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
311 struct isci_phy *iphy); 282 struct isci_phy *iphy);
283int isci_ata_check_ready(struct domain_device *dev);
312#endif /* !defined(_ISCI_PORT_H_) */ 284#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index eaa541afc755..7eb0ccd45fe6 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -370,6 +370,27 @@ struct scu_iit_entry {
370 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \ 370 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
371 ) 371 )
372 372
373/* ***************************************************************************** */
374#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT (0)
375#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK (0x00000001)
376#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT (1)
377#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK (0x00000002)
378#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT (2)
379#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK (0x00000004)
380#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT (3)
381#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK (0x00000008)
382#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT (16)
383#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK (0x000F0000)
384#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT (31)
385#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK (0x80000000)
386#define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK (0x7FF0FFF0)
387
388#define SMU_CGUCR_GEN_VAL(name, value) \
389 SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value)
390
391#define SMU_CGUCR_GEN_BIT(name) \
392 SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name)
393
373/* -------------------------------------------------------------------------- */ 394/* -------------------------------------------------------------------------- */
374 395
375#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0) 396#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
@@ -992,8 +1013,10 @@ struct smu_registers {
992 u32 mmr_address_window; 1013 u32 mmr_address_window;
993/* 0x00A4 SMDW */ 1014/* 0x00A4 SMDW */
994 u32 mmr_data_window; 1015 u32 mmr_data_window;
995 u32 reserved_A8; 1016/* 0x00A8 CGUCR */
996 u32 reserved_AC; 1017 u32 clock_gating_control;
1018/* 0x00AC CGUPC */
1019 u32 clock_gating_performance;
997/* A whole bunch of reserved space */ 1020/* A whole bunch of reserved space */
998 u32 reserved_Bx[4]; 1021 u32 reserved_Bx[4];
999 u32 reserved_Cx[4]; 1022 u32 reserved_Cx[4];
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index dd74b6ceeb82..8f501b0a81d6 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -62,6 +62,16 @@
62#include "scu_event_codes.h" 62#include "scu_event_codes.h"
63#include "task.h" 63#include "task.h"
64 64
65#undef C
66#define C(a) (#a)
67const char *dev_state_name(enum sci_remote_device_states state)
68{
69 static const char * const strings[] = REMOTE_DEV_STATES;
70
71 return strings[state];
72}
73#undef C
74
65/** 75/**
66 * isci_remote_device_not_ready() - This function is called by the ihost when 76 * isci_remote_device_not_ready() - This function is called by the ihost when
67 * the remote device is not ready. We mark the isci device as ready (not 77 * the remote device is not ready. We mark the isci device as ready (not
@@ -167,8 +177,8 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
167 case SCI_DEV_FAILED: 177 case SCI_DEV_FAILED:
168 case SCI_DEV_FINAL: 178 case SCI_DEV_FINAL:
169 default: 179 default:
170 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 180 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
171 __func__, state); 181 __func__, dev_state_name(state));
172 return SCI_FAILURE_INVALID_STATE; 182 return SCI_FAILURE_INVALID_STATE;
173 case SCI_DEV_STOPPED: 183 case SCI_DEV_STOPPED:
174 return SCI_SUCCESS; 184 return SCI_SUCCESS;
@@ -226,8 +236,8 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
226 case SCI_DEV_RESETTING: 236 case SCI_DEV_RESETTING:
227 case SCI_DEV_FINAL: 237 case SCI_DEV_FINAL:
228 default: 238 default:
229 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 239 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
230 __func__, state); 240 __func__, dev_state_name(state));
231 return SCI_FAILURE_INVALID_STATE; 241 return SCI_FAILURE_INVALID_STATE;
232 case SCI_DEV_READY: 242 case SCI_DEV_READY:
233 case SCI_STP_DEV_IDLE: 243 case SCI_STP_DEV_IDLE:
@@ -246,8 +256,8 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
246 enum sci_remote_device_states state = sm->current_state_id; 256 enum sci_remote_device_states state = sm->current_state_id;
247 257
248 if (state != SCI_DEV_RESETTING) { 258 if (state != SCI_DEV_RESETTING) {
249 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 259 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
250 __func__, state); 260 __func__, dev_state_name(state));
251 return SCI_FAILURE_INVALID_STATE; 261 return SCI_FAILURE_INVALID_STATE;
252 } 262 }
253 263
@@ -262,8 +272,8 @@ enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
262 enum sci_remote_device_states state = sm->current_state_id; 272 enum sci_remote_device_states state = sm->current_state_id;
263 273
264 if (state != SCI_STP_DEV_CMD) { 274 if (state != SCI_STP_DEV_CMD) {
265 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 275 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
266 __func__, state); 276 __func__, dev_state_name(state));
267 return SCI_FAILURE_INVALID_STATE; 277 return SCI_FAILURE_INVALID_STATE;
268 } 278 }
269 279
@@ -287,8 +297,8 @@ enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
287 case SCI_SMP_DEV_IDLE: 297 case SCI_SMP_DEV_IDLE:
288 case SCI_DEV_FINAL: 298 case SCI_DEV_FINAL:
289 default: 299 default:
290 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 300 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
291 __func__, state); 301 __func__, dev_state_name(state));
292 /* Return the frame back to the controller */ 302 /* Return the frame back to the controller */
293 sci_controller_release_frame(ihost, frame_index); 303 sci_controller_release_frame(ihost, frame_index);
294 return SCI_FAILURE_INVALID_STATE; 304 return SCI_FAILURE_INVALID_STATE;
@@ -502,8 +512,8 @@ enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
502 case SCI_DEV_RESETTING: 512 case SCI_DEV_RESETTING:
503 case SCI_DEV_FINAL: 513 case SCI_DEV_FINAL:
504 default: 514 default:
505 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 515 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
506 __func__, state); 516 __func__, dev_state_name(state));
507 return SCI_FAILURE_INVALID_STATE; 517 return SCI_FAILURE_INVALID_STATE;
508 case SCI_DEV_READY: 518 case SCI_DEV_READY:
509 /* attempt to start an io request for this device object. The remote 519 /* attempt to start an io request for this device object. The remote
@@ -637,8 +647,8 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
637 case SCI_DEV_FAILED: 647 case SCI_DEV_FAILED:
638 case SCI_DEV_FINAL: 648 case SCI_DEV_FINAL:
639 default: 649 default:
640 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 650 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
641 __func__, state); 651 __func__, dev_state_name(state));
642 return SCI_FAILURE_INVALID_STATE; 652 return SCI_FAILURE_INVALID_STATE;
643 case SCI_DEV_READY: 653 case SCI_DEV_READY:
644 case SCI_STP_DEV_AWAIT_RESET: 654 case SCI_STP_DEV_AWAIT_RESET:
@@ -721,8 +731,8 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
721 case SCI_DEV_RESETTING: 731 case SCI_DEV_RESETTING:
722 case SCI_DEV_FINAL: 732 case SCI_DEV_FINAL:
723 default: 733 default:
724 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 734 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
725 __func__, state); 735 __func__, dev_state_name(state));
726 return SCI_FAILURE_INVALID_STATE; 736 return SCI_FAILURE_INVALID_STATE;
727 case SCI_STP_DEV_IDLE: 737 case SCI_STP_DEV_IDLE:
728 case SCI_STP_DEV_CMD: 738 case SCI_STP_DEV_CMD:
@@ -853,8 +863,8 @@ static enum sci_status sci_remote_device_destruct(struct isci_remote_device *ide
853 struct isci_host *ihost; 863 struct isci_host *ihost;
854 864
855 if (state != SCI_DEV_STOPPED) { 865 if (state != SCI_DEV_STOPPED) {
856 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 866 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
857 __func__, state); 867 __func__, dev_state_name(state));
858 return SCI_FAILURE_INVALID_STATE; 868 return SCI_FAILURE_INVALID_STATE;
859 } 869 }
860 870
@@ -1204,8 +1214,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1204 enum sci_status status; 1214 enum sci_status status;
1205 1215
1206 if (state != SCI_DEV_STOPPED) { 1216 if (state != SCI_DEV_STOPPED) {
1207 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 1217 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1208 __func__, state); 1218 __func__, dev_state_name(state));
1209 return SCI_FAILURE_INVALID_STATE; 1219 return SCI_FAILURE_INVALID_STATE;
1210 } 1220 }
1211 1221
@@ -1308,7 +1318,6 @@ void isci_remote_device_release(struct kref *kref)
1308 clear_bit(IDEV_STOP_PENDING, &idev->flags); 1318 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1309 clear_bit(IDEV_IO_READY, &idev->flags); 1319 clear_bit(IDEV_IO_READY, &idev->flags);
1310 clear_bit(IDEV_GONE, &idev->flags); 1320 clear_bit(IDEV_GONE, &idev->flags);
1311 clear_bit(IDEV_EH, &idev->flags);
1312 smp_mb__before_clear_bit(); 1321 smp_mb__before_clear_bit();
1313 clear_bit(IDEV_ALLOCATED, &idev->flags); 1322 clear_bit(IDEV_ALLOCATED, &idev->flags);
1314 wake_up(&ihost->eventq); 1323 wake_up(&ihost->eventq);
@@ -1381,34 +1390,17 @@ void isci_remote_device_gone(struct domain_device *dev)
1381 * 1390 *
1382 * status, zero indicates success. 1391 * status, zero indicates success.
1383 */ 1392 */
1384int isci_remote_device_found(struct domain_device *domain_dev) 1393int isci_remote_device_found(struct domain_device *dev)
1385{ 1394{
1386 struct isci_host *isci_host = dev_to_ihost(domain_dev); 1395 struct isci_host *isci_host = dev_to_ihost(dev);
1387 struct isci_port *isci_port; 1396 struct isci_port *isci_port = dev->port->lldd_port;
1388 struct isci_phy *isci_phy;
1389 struct asd_sas_port *sas_port;
1390 struct asd_sas_phy *sas_phy;
1391 struct isci_remote_device *isci_device; 1397 struct isci_remote_device *isci_device;
1392 enum sci_status status; 1398 enum sci_status status;
1393 1399
1394 dev_dbg(&isci_host->pdev->dev, 1400 dev_dbg(&isci_host->pdev->dev,
1395 "%s: domain_device = %p\n", __func__, domain_dev); 1401 "%s: domain_device = %p\n", __func__, dev);
1396
1397 wait_for_start(isci_host);
1398
1399 sas_port = domain_dev->port;
1400 sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
1401 port_phy_el);
1402 isci_phy = to_iphy(sas_phy);
1403 isci_port = isci_phy->isci_port;
1404
1405 /* we are being called for a device on this port,
1406 * so it has to come up eventually
1407 */
1408 wait_for_completion(&isci_port->start_complete);
1409 1402
1410 if ((isci_stopping == isci_port_get_state(isci_port)) || 1403 if (!isci_port)
1411 (isci_stopped == isci_port_get_state(isci_port)))
1412 return -ENODEV; 1404 return -ENODEV;
1413 1405
1414 isci_device = isci_remote_device_alloc(isci_host, isci_port); 1406 isci_device = isci_remote_device_alloc(isci_host, isci_port);
@@ -1419,7 +1411,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
1419 INIT_LIST_HEAD(&isci_device->node); 1411 INIT_LIST_HEAD(&isci_device->node);
1420 1412
1421 spin_lock_irq(&isci_host->scic_lock); 1413 spin_lock_irq(&isci_host->scic_lock);
1422 isci_device->domain_dev = domain_dev; 1414 isci_device->domain_dev = dev;
1423 isci_device->isci_port = isci_port; 1415 isci_device->isci_port = isci_port;
1424 list_add_tail(&isci_device->node, &isci_port->remote_dev_list); 1416 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1425 1417
@@ -1432,7 +1424,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
1432 1424
1433 if (status == SCI_SUCCESS) { 1425 if (status == SCI_SUCCESS) {
1434 /* device came up, advertise it to the world */ 1426 /* device came up, advertise it to the world */
1435 domain_dev->lldd_dev = isci_device; 1427 dev->lldd_dev = isci_device;
1436 } else 1428 } else
1437 isci_put_device(isci_device); 1429 isci_put_device(isci_device);
1438 spin_unlock_irq(&isci_host->scic_lock); 1430 spin_unlock_irq(&isci_host->scic_lock);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 483ee50152f3..58637ee08f55 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -82,10 +82,9 @@ struct isci_remote_device {
82 #define IDEV_START_PENDING 0 82 #define IDEV_START_PENDING 0
83 #define IDEV_STOP_PENDING 1 83 #define IDEV_STOP_PENDING 1
84 #define IDEV_ALLOCATED 2 84 #define IDEV_ALLOCATED 2
85 #define IDEV_EH 3 85 #define IDEV_GONE 3
86 #define IDEV_GONE 4 86 #define IDEV_IO_READY 4
87 #define IDEV_IO_READY 5 87 #define IDEV_IO_NCQERROR 5
88 #define IDEV_IO_NCQERROR 6
89 unsigned long flags; 88 unsigned long flags;
90 struct kref kref; 89 struct kref kref;
91 struct isci_port *isci_port; 90 struct isci_port *isci_port;
@@ -180,122 +179,101 @@ enum sci_status sci_remote_device_reset_complete(
180/** 179/**
181 * enum sci_remote_device_states - This enumeration depicts all the states 180 * enum sci_remote_device_states - This enumeration depicts all the states
182 * for the common remote device state machine. 181 * for the common remote device state machine.
182 * @SCI_DEV_INITIAL: Simply the initial state for the base remote device
183 * state machine.
183 * 184 *
185 * @SCI_DEV_STOPPED: This state indicates that the remote device has
186 * successfully been stopped. In this state no new IO operations are
187 * permitted. This state is entered from the INITIAL state. This state
188 * is entered from the STOPPING state.
184 * 189 *
190 * @SCI_DEV_STARTING: This state indicates the the remote device is in
191 * the process of becoming ready (i.e. starting). In this state no new
192 * IO operations are permitted. This state is entered from the STOPPED
193 * state.
194 *
195 * @SCI_DEV_READY: This state indicates the remote device is now ready.
196 * Thus, the user is able to perform IO operations on the remote device.
197 * This state is entered from the STARTING state.
198 *
199 * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote
200 * device. When there are no active IO for the device it is is in this
201 * state.
202 *
203 * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
204 * device. This state is entered when the device is processing a
205 * non-NCQ command. The device object will fail any new start IO
206 * requests until this command is complete.
207 *
208 * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device.
209 * This state is entered when the device is processing an NCQ reuqest.
210 * It will remain in this state so long as there is one or more NCQ
211 * requests being processed.
212 *
213 * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP
214 * remote device. This state is entered when an SDB error FIS is
215 * received by the device object while in the NCQ state. The device
216 * object will only accept a READ LOG command while in this state.
217 *
218 * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP
219 * ATAPI remote device. This state is entered when ATAPI device sends
220 * error status FIS without data while the device object is in CMD
221 * state. A suspension event is expected in this state. The device
222 * object will resume right away.
223 *
224 * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the
225 * device is waiting for the RESET task coming to be recovered from
226 * certain hardware specific error.
227 *
228 * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the
229 * remote device. This is the normal operational state for a remote
230 * device.
231 *
232 * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device.
233 * This is the state that the device is placed in when a RNC suspend is
234 * received by the SCU hardware.
235 *
236 * @SCI_DEV_STOPPING: This state indicates that the remote device is in
237 * the process of stopping. In this state no new IO operations are
238 * permitted, but existing IO operations are allowed to complete. This
239 * state is entered from the READY state. This state is entered from
240 * the FAILED state.
241 *
242 * @SCI_DEV_FAILED: This state indicates that the remote device has
243 * failed. In this state no new IO operations are permitted. This
244 * state is entered from the INITIALIZING state. This state is entered
245 * from the READY state.
246 *
247 * @SCI_DEV_RESETTING: This state indicates the device is being reset.
248 * In this state no new IO operations are permitted. This state is
249 * entered from the READY state.
250 *
251 * @SCI_DEV_FINAL: Simply the final state for the base remote device
252 * state machine.
185 */ 253 */
186enum sci_remote_device_states { 254#define REMOTE_DEV_STATES {\
187 /** 255 C(DEV_INITIAL),\
188 * Simply the initial state for the base remote device state machine. 256 C(DEV_STOPPED),\
189 */ 257 C(DEV_STARTING),\
190 SCI_DEV_INITIAL, 258 C(DEV_READY),\
191 259 C(STP_DEV_IDLE),\
192 /** 260 C(STP_DEV_CMD),\
193 * This state indicates that the remote device has successfully been 261 C(STP_DEV_NCQ),\
194 * stopped. In this state no new IO operations are permitted. 262 C(STP_DEV_NCQ_ERROR),\
195 * This state is entered from the INITIAL state. 263 C(STP_DEV_ATAPI_ERROR),\
196 * This state is entered from the STOPPING state. 264 C(STP_DEV_AWAIT_RESET),\
197 */ 265 C(SMP_DEV_IDLE),\
198 SCI_DEV_STOPPED, 266 C(SMP_DEV_CMD),\
199 267 C(DEV_STOPPING),\
200 /** 268 C(DEV_FAILED),\
201 * This state indicates the the remote device is in the process of 269 C(DEV_RESETTING),\
202 * becoming ready (i.e. starting). In this state no new IO operations 270 C(DEV_FINAL),\
203 * are permitted. 271 }
204 * This state is entered from the STOPPED state. 272#undef C
205 */ 273#define C(a) SCI_##a
206 SCI_DEV_STARTING, 274enum sci_remote_device_states REMOTE_DEV_STATES;
207 275#undef C
208 /** 276const char *dev_state_name(enum sci_remote_device_states state);
209 * This state indicates the remote device is now ready. Thus, the user
210 * is able to perform IO operations on the remote device.
211 * This state is entered from the STARTING state.
212 */
213 SCI_DEV_READY,
214
215 /**
216 * This is the idle substate for the stp remote device. When there are no
217 * active IO for the device it is is in this state.
218 */
219 SCI_STP_DEV_IDLE,
220
221 /**
222 * This is the command state for for the STP remote device. This state is
223 * entered when the device is processing a non-NCQ command. The device object
224 * will fail any new start IO requests until this command is complete.
225 */
226 SCI_STP_DEV_CMD,
227
228 /**
229 * This is the NCQ state for the STP remote device. This state is entered
230 * when the device is processing an NCQ reuqest. It will remain in this state
231 * so long as there is one or more NCQ requests being processed.
232 */
233 SCI_STP_DEV_NCQ,
234
235 /**
236 * This is the NCQ error state for the STP remote device. This state is
237 * entered when an SDB error FIS is received by the device object while in the
238 * NCQ state. The device object will only accept a READ LOG command while in
239 * this state.
240 */
241 SCI_STP_DEV_NCQ_ERROR,
242
243 /**
244 * This is the ATAPI error state for the STP ATAPI remote device.
245 * This state is entered when ATAPI device sends error status FIS
246 * without data while the device object is in CMD state.
247 * A suspension event is expected in this state.
248 * The device object will resume right away.
249 */
250 SCI_STP_DEV_ATAPI_ERROR,
251
252 /**
253 * This is the READY substate indicates the device is waiting for the RESET task
254 * coming to be recovered from certain hardware specific error.
255 */
256 SCI_STP_DEV_AWAIT_RESET,
257
258 /**
259 * This is the ready operational substate for the remote device. This is the
260 * normal operational state for a remote device.
261 */
262 SCI_SMP_DEV_IDLE,
263
264 /**
265 * This is the suspended state for the remote device. This is the state that
266 * the device is placed in when a RNC suspend is received by the SCU hardware.
267 */
268 SCI_SMP_DEV_CMD,
269
270 /**
271 * This state indicates that the remote device is in the process of
272 * stopping. In this state no new IO operations are permitted, but
273 * existing IO operations are allowed to complete.
274 * This state is entered from the READY state.
275 * This state is entered from the FAILED state.
276 */
277 SCI_DEV_STOPPING,
278
279 /**
280 * This state indicates that the remote device has failed.
281 * In this state no new IO operations are permitted.
282 * This state is entered from the INITIALIZING state.
283 * This state is entered from the READY state.
284 */
285 SCI_DEV_FAILED,
286
287 /**
288 * This state indicates the device is being reset.
289 * In this state no new IO operations are permitted.
290 * This state is entered from the READY state.
291 */
292 SCI_DEV_RESETTING,
293
294 /**
295 * Simply the final state for the base remote device state machine.
296 */
297 SCI_DEV_FINAL,
298};
299 277
300static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) 278static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
301{ 279{
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 748e8339d1ec..3a9463481f38 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -60,18 +60,15 @@
60#include "scu_event_codes.h" 60#include "scu_event_codes.h"
61#include "scu_task_context.h" 61#include "scu_task_context.h"
62 62
63#undef C
64#define C(a) (#a)
65const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
66{
67 static const char * const strings[] = RNC_STATES;
63 68
64/** 69 return strings[state];
65 * 70}
66 * @sci_rnc: The RNC for which the is posted request is being made. 71#undef C
67 *
68 * This method will return true if the RNC is not in the initial state. In all
69 * other states the RNC is considered active and this will return true. The
70 * destroy request of the state machine drives the RNC back to the initial
71 * state. If the state machine changes then this routine will also have to be
72 * changed. bool true if the state machine is not in the initial state false if
73 * the state machine is in the initial state
74 */
75 72
76/** 73/**
77 * 74 *
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index 41580ad12520..a241e0f4c865 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -85,61 +85,50 @@ struct sci_remote_node_context;
85typedef void (*scics_sds_remote_node_context_callback)(void *); 85typedef void (*scics_sds_remote_node_context_callback)(void *);
86 86
87/** 87/**
88 * This is the enumeration of the remote node context states. 88 * enum sci_remote_node_context_states
89 * @SCI_RNC_INITIAL initial state for a remote node context. On a resume
90 * request the remote node context will transition to the posting state.
91 *
92 * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once
93 * the RNC is posted the remote node context will be made ready.
94 *
95 * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to
96 * the hardware. Once the invalidate is complete the remote node context will
97 * transition to the posting state.
98 *
99 * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the
100 * hardare. Once the event notification of resume complete is received the
101 * remote node context will transition to the ready state.
102 *
103 * @SCI_RNC_READY: state that the remote node context must be in to accept io
104 * request operations.
105 *
106 * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
107 * it gets a TX suspend notification from the hardware.
108 *
109 * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
110 * when it gets a TX RX suspend notification from the hardware.
111 *
112 * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
113 * for a suspend notification from the hardware. This state is entered when
114 * either there is a request to supend the remote node context or when there is
115 * a TC completion where the remote node will be suspended by the hardware.
89 */ 116 */
90enum scis_sds_remote_node_context_states { 117#define RNC_STATES {\
91 /** 118 C(RNC_INITIAL),\
92 * This state is the initial state for a remote node context. On a resume 119 C(RNC_POSTING),\
93 * request the remote node context will transition to the posting state. 120 C(RNC_INVALIDATING),\
94 */ 121 C(RNC_RESUMING),\
95 SCI_RNC_INITIAL, 122 C(RNC_READY),\
96 123 C(RNC_TX_SUSPENDED),\
97 /** 124 C(RNC_TX_RX_SUSPENDED),\
98 * This is a transition state that posts the RNi to the hardware. Once the RNC 125 C(RNC_AWAIT_SUSPENSION),\
99 * is posted the remote node context will be made ready. 126 }
100 */ 127#undef C
101 SCI_RNC_POSTING, 128#define C(a) SCI_##a
102 129enum scis_sds_remote_node_context_states RNC_STATES;
103 /** 130#undef C
104 * This is a transition state that will post an RNC invalidate to the 131const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
105 * hardware. Once the invalidate is complete the remote node context will
106 * transition to the posting state.
107 */
108 SCI_RNC_INVALIDATING,
109
110 /**
111 * This is a transition state that will post an RNC resume to the hardare.
112 * Once the event notification of resume complete is received the remote node
113 * context will transition to the ready state.
114 */
115 SCI_RNC_RESUMING,
116
117 /**
118 * This is the state that the remote node context must be in to accept io
119 * request operations.
120 */
121 SCI_RNC_READY,
122
123 /**
124 * This is the state that the remote node context transitions to when it gets
125 * a TX suspend notification from the hardware.
126 */
127 SCI_RNC_TX_SUSPENDED,
128
129 /**
130 * This is the state that the remote node context transitions to when it gets
131 * a TX RX suspend notification from the hardware.
132 */
133 SCI_RNC_TX_RX_SUSPENDED,
134
135 /**
136 * This state is a wait state for the remote node context that waits for a
137 * suspend notification from the hardware. This state is entered when either
138 * there is a request to supend the remote node context or when there is a TC
139 * completion where the remote node will be suspended by the hardware.
140 */
141 SCI_RNC_AWAIT_SUSPENSION
142};
143 132
144/** 133/**
145 * 134 *
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ee0dc05c6269..2def1e3960f6 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -53,6 +53,7 @@
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56#include <scsi/scsi_cmnd.h>
56#include "isci.h" 57#include "isci.h"
57#include "task.h" 58#include "task.h"
58#include "request.h" 59#include "request.h"
@@ -60,6 +61,16 @@
60#include "scu_event_codes.h" 61#include "scu_event_codes.h"
61#include "sas.h" 62#include "sas.h"
62 63
64#undef C
65#define C(a) (#a)
66const char *req_state_name(enum sci_base_request_states state)
67{
68 static const char * const strings[] = REQUEST_STATES;
69
70 return strings[state];
71}
72#undef C
73
63static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 74static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
64 int idx) 75 int idx)
65{ 76{
@@ -264,6 +275,141 @@ static void scu_ssp_reqeust_construct_task_context(
264 task_context->response_iu_lower = lower_32_bits(dma_addr); 275 task_context->response_iu_lower = lower_32_bits(dma_addr);
265} 276}
266 277
278static u8 scu_bg_blk_size(struct scsi_device *sdp)
279{
280 switch (sdp->sector_size) {
281 case 512:
282 return 0;
283 case 1024:
284 return 1;
285 case 4096:
286 return 3;
287 default:
288 return 0xff;
289 }
290}
291
292static u32 scu_dif_bytes(u32 len, u32 sector_size)
293{
294 return (len >> ilog2(sector_size)) * 8;
295}
296
297static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
298{
299 struct scu_task_context *tc = ireq->tc;
300 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
301 u8 blk_sz = scu_bg_blk_size(scmd->device);
302
303 tc->block_guard_enable = 1;
304 tc->blk_prot_en = 1;
305 tc->blk_sz = blk_sz;
306 /* DIF write insert */
307 tc->blk_prot_func = 0x2;
308
309 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
310 scmd->device->sector_size);
311
312 /* always init to 0, used by hw */
313 tc->interm_crc_val = 0;
314
315 tc->init_crc_seed = 0;
316 tc->app_tag_verify = 0;
317 tc->app_tag_gen = 0;
318 tc->ref_tag_seed_verify = 0;
319
320 /* always init to same as bg_blk_sz */
321 tc->UD_bytes_immed_val = scmd->device->sector_size;
322
323 tc->reserved_DC_0 = 0;
324
325 /* always init to 8 */
326 tc->DIF_bytes_immed_val = 8;
327
328 tc->reserved_DC_1 = 0;
329 tc->bgc_blk_sz = scmd->device->sector_size;
330 tc->reserved_E0_0 = 0;
331 tc->app_tag_gen_mask = 0;
332
333 /** setup block guard control **/
334 tc->bgctl = 0;
335
336 /* DIF write insert */
337 tc->bgctl_f.op = 0x2;
338
339 tc->app_tag_verify_mask = 0;
340
341 /* must init to 0 for hw */
342 tc->blk_guard_err = 0;
343
344 tc->reserved_E8_0 = 0;
345
346 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
347 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
348 else if (type & SCSI_PROT_DIF_TYPE3)
349 tc->ref_tag_seed_gen = 0;
350}
351
352static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
353{
354 struct scu_task_context *tc = ireq->tc;
355 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
356 u8 blk_sz = scu_bg_blk_size(scmd->device);
357
358 tc->block_guard_enable = 1;
359 tc->blk_prot_en = 1;
360 tc->blk_sz = blk_sz;
361 /* DIF read strip */
362 tc->blk_prot_func = 0x1;
363
364 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
365 scmd->device->sector_size);
366
367 /* always init to 0, used by hw */
368 tc->interm_crc_val = 0;
369
370 tc->init_crc_seed = 0;
371 tc->app_tag_verify = 0;
372 tc->app_tag_gen = 0;
373
374 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
375 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
376 else if (type & SCSI_PROT_DIF_TYPE3)
377 tc->ref_tag_seed_verify = 0;
378
379 /* always init to same as bg_blk_sz */
380 tc->UD_bytes_immed_val = scmd->device->sector_size;
381
382 tc->reserved_DC_0 = 0;
383
384 /* always init to 8 */
385 tc->DIF_bytes_immed_val = 8;
386
387 tc->reserved_DC_1 = 0;
388 tc->bgc_blk_sz = scmd->device->sector_size;
389 tc->reserved_E0_0 = 0;
390 tc->app_tag_gen_mask = 0;
391
392 /** setup block guard control **/
393 tc->bgctl = 0;
394
395 /* DIF read strip */
396 tc->bgctl_f.crc_verify = 1;
397 tc->bgctl_f.op = 0x1;
398 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
399 tc->bgctl_f.ref_tag_chk = 1;
400 tc->bgctl_f.app_f_detect = 1;
401 } else if (type & SCSI_PROT_DIF_TYPE3)
402 tc->bgctl_f.app_ref_f_detect = 1;
403
404 tc->app_tag_verify_mask = 0;
405
406 /* must init to 0 for hw */
407 tc->blk_guard_err = 0;
408
409 tc->reserved_E8_0 = 0;
410 tc->ref_tag_seed_gen = 0;
411}
412
267/** 413/**
268 * This method is will fill in the SCU Task Context for a SSP IO request. 414 * This method is will fill in the SCU Task Context for a SSP IO request.
269 * @sci_req: 415 * @sci_req:
@@ -274,6 +420,10 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
274 u32 len) 420 u32 len)
275{ 421{
276 struct scu_task_context *task_context = ireq->tc; 422 struct scu_task_context *task_context = ireq->tc;
423 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
424 struct scsi_cmnd *scmd = sas_task->uldd_task;
425 u8 prot_type = scsi_get_prot_type(scmd);
426 u8 prot_op = scsi_get_prot_op(scmd);
277 427
278 scu_ssp_reqeust_construct_task_context(ireq, task_context); 428 scu_ssp_reqeust_construct_task_context(ireq, task_context);
279 429
@@ -296,6 +446,13 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
296 446
297 if (task_context->transfer_length_bytes > 0) 447 if (task_context->transfer_length_bytes > 0)
298 sci_request_build_sgl(ireq); 448 sci_request_build_sgl(ireq);
449
450 if (prot_type != SCSI_PROT_DIF_TYPE0) {
451 if (prot_op == SCSI_PROT_READ_STRIP)
452 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
453 else if (prot_op == SCSI_PROT_WRITE_INSERT)
454 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
455 }
299} 456}
300 457
301/** 458/**
@@ -519,18 +676,12 @@ sci_io_request_construct_sata(struct isci_request *ireq,
519 if (test_bit(IREQ_TMF, &ireq->flags)) { 676 if (test_bit(IREQ_TMF, &ireq->flags)) {
520 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 677 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
521 678
522 if (tmf->tmf_code == isci_tmf_sata_srst_high || 679 dev_err(&ireq->owning_controller->pdev->dev,
523 tmf->tmf_code == isci_tmf_sata_srst_low) { 680 "%s: Request 0x%p received un-handled SAT "
524 scu_stp_raw_request_construct_task_context(ireq); 681 "management protocol 0x%x.\n",
525 return SCI_SUCCESS; 682 __func__, ireq, tmf->tmf_code);
526 } else {
527 dev_err(&ireq->owning_controller->pdev->dev,
528 "%s: Request 0x%p received un-handled SAT "
529 "management protocol 0x%x.\n",
530 __func__, ireq, tmf->tmf_code);
531 683
532 return SCI_FAILURE; 684 return SCI_FAILURE;
533 }
534 } 685 }
535 686
536 if (!sas_protocol_ata(task->task_proto)) { 687 if (!sas_protocol_ata(task->task_proto)) {
@@ -627,34 +778,6 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
627 return status; 778 return status;
628} 779}
629 780
630enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
631{
632 enum sci_status status = SCI_SUCCESS;
633
634 /* check for management protocols */
635 if (test_bit(IREQ_TMF, &ireq->flags)) {
636 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
637
638 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
639 tmf->tmf_code == isci_tmf_sata_srst_low) {
640 scu_stp_raw_request_construct_task_context(ireq);
641 } else {
642 dev_err(&ireq->owning_controller->pdev->dev,
643 "%s: Request 0x%p received un-handled SAT "
644 "Protocol 0x%x.\n",
645 __func__, ireq, tmf->tmf_code);
646
647 return SCI_FAILURE;
648 }
649 }
650
651 if (status != SCI_SUCCESS)
652 return status;
653 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
654
655 return status;
656}
657
658/** 781/**
659 * sci_req_tx_bytes - bytes transferred when reply underruns request 782 * sci_req_tx_bytes - bytes transferred when reply underruns request
660 * @ireq: request that was terminated early 783 * @ireq: request that was terminated early
@@ -756,9 +879,6 @@ sci_io_request_terminate(struct isci_request *ireq)
756 case SCI_REQ_STP_PIO_WAIT_FRAME: 879 case SCI_REQ_STP_PIO_WAIT_FRAME:
757 case SCI_REQ_STP_PIO_DATA_IN: 880 case SCI_REQ_STP_PIO_DATA_IN:
758 case SCI_REQ_STP_PIO_DATA_OUT: 881 case SCI_REQ_STP_PIO_DATA_OUT:
759 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
760 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
761 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
762 case SCI_REQ_ATAPI_WAIT_H2D: 882 case SCI_REQ_ATAPI_WAIT_H2D:
763 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 883 case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
764 case SCI_REQ_ATAPI_WAIT_D2H: 884 case SCI_REQ_ATAPI_WAIT_D2H:
@@ -800,7 +920,8 @@ enum sci_status sci_request_complete(struct isci_request *ireq)
800 920
801 state = ireq->sm.current_state_id; 921 state = ireq->sm.current_state_id;
802 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 922 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
803 "isci: request completion from wrong state (%d)\n", state)) 923 "isci: request completion from wrong state (%s)\n",
924 req_state_name(state)))
804 return SCI_FAILURE_INVALID_STATE; 925 return SCI_FAILURE_INVALID_STATE;
805 926
806 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 927 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
@@ -821,8 +942,8 @@ enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
821 state = ireq->sm.current_state_id; 942 state = ireq->sm.current_state_id;
822 943
823 if (state != SCI_REQ_STP_PIO_DATA_IN) { 944 if (state != SCI_REQ_STP_PIO_DATA_IN) {
824 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n", 945 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
825 __func__, event_code, state); 946 __func__, event_code, req_state_name(state));
826 947
827 return SCI_FAILURE_INVALID_STATE; 948 return SCI_FAILURE_INVALID_STATE;
828 } 949 }
@@ -1938,59 +2059,6 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1938 return status; 2059 return status;
1939 } 2060 }
1940 2061
1941 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1942 struct dev_to_host_fis *frame_header;
1943 u32 *frame_buffer;
1944
1945 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1946 frame_index,
1947 (void **)&frame_header);
1948 if (status != SCI_SUCCESS) {
1949 dev_err(&ihost->pdev->dev,
1950 "%s: SCIC IO Request 0x%p could not get frame "
1951 "header for frame index %d, status %x\n",
1952 __func__,
1953 stp_req,
1954 frame_index,
1955 status);
1956 return status;
1957 }
1958
1959 switch (frame_header->fis_type) {
1960 case FIS_REGD2H:
1961 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1962 frame_index,
1963 (void **)&frame_buffer);
1964
1965 sci_controller_copy_sata_response(&ireq->stp.rsp,
1966 frame_header,
1967 frame_buffer);
1968
1969 /* The command has completed with error */
1970 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1971 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1972 break;
1973
1974 default:
1975 dev_warn(&ihost->pdev->dev,
1976 "%s: IO Request:0x%p Frame Id:%d protocol "
1977 "violation occurred\n",
1978 __func__,
1979 stp_req,
1980 frame_index);
1981
1982 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1983 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1984 break;
1985 }
1986
1987 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1988
1989 /* Frame has been decoded return it to the controller */
1990 sci_controller_release_frame(ihost, frame_index);
1991
1992 return status;
1993 }
1994 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { 2062 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
1995 struct sas_task *task = isci_request_access_task(ireq); 2063 struct sas_task *task = isci_request_access_task(ireq);
1996 2064
@@ -2088,57 +2156,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
2088 return status; 2156 return status;
2089} 2157}
2090 2158
2091static enum sci_status
2092stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
2093 u32 completion_code)
2094{
2095 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2097 ireq->scu_status = SCU_TASK_DONE_GOOD;
2098 ireq->sci_status = SCI_SUCCESS;
2099 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2100 break;
2101
2102 default:
2103 /*
2104 * All other completion status cause the IO to be complete.
2105 * If a NAK was received, then it is up to the user to retry
2106 * the request.
2107 */
2108 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2109 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2110 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2111 break;
2112 }
2113
2114 return SCI_SUCCESS;
2115}
2116
2117static enum sci_status
2118stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2119 u32 completion_code)
2120{
2121 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2122 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2123 ireq->scu_status = SCU_TASK_DONE_GOOD;
2124 ireq->sci_status = SCI_SUCCESS;
2125 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2126 break;
2127
2128 default:
2129 /* All other completion status cause the IO to be complete. If
2130 * a NAK was received, then it is up to the user to retry the
2131 * request.
2132 */
2133 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2134 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2135 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2136 break;
2137 }
2138
2139 return SCI_SUCCESS;
2140}
2141
2142static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, 2159static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2143 enum sci_base_request_states next) 2160 enum sci_base_request_states next)
2144{ 2161{
@@ -2284,14 +2301,6 @@ sci_io_request_tc_completion(struct isci_request *ireq,
2284 case SCI_REQ_STP_PIO_DATA_OUT: 2301 case SCI_REQ_STP_PIO_DATA_OUT:
2285 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2302 return pio_data_out_tx_done_tc_event(ireq, completion_code);
2286 2303
2287 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2288 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2289 completion_code);
2290
2291 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2292 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2293 completion_code);
2294
2295 case SCI_REQ_ABORTING: 2304 case SCI_REQ_ABORTING:
2296 return request_aborting_state_tc_event(ireq, 2305 return request_aborting_state_tc_event(ireq,
2297 completion_code); 2306 completion_code);
@@ -2308,12 +2317,8 @@ sci_io_request_tc_completion(struct isci_request *ireq,
2308 return atapi_data_tc_completion_handler(ireq, completion_code); 2317 return atapi_data_tc_completion_handler(ireq, completion_code);
2309 2318
2310 default: 2319 default:
2311 dev_warn(&ihost->pdev->dev, 2320 dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
2312 "%s: SCIC IO Request given task completion " 2321 __func__, completion_code, req_state_name(state));
2313 "notification %x while in wrong state %d\n",
2314 __func__,
2315 completion_code,
2316 state);
2317 return SCI_FAILURE_INVALID_STATE; 2322 return SCI_FAILURE_INVALID_STATE;
2318 } 2323 }
2319} 2324}
@@ -3065,10 +3070,6 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
3065 */ 3070 */
3066 if (!task && dev->dev_type == SAS_END_DEV) { 3071 if (!task && dev->dev_type == SAS_END_DEV) {
3067 state = SCI_REQ_TASK_WAIT_TC_COMP; 3072 state = SCI_REQ_TASK_WAIT_TC_COMP;
3068 } else if (!task &&
3069 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3070 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3071 state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
3072 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 3073 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3073 state = SCI_REQ_SMP_WAIT_RESP; 3074 state = SCI_REQ_SMP_WAIT_RESP;
3074 } else if (task && sas_protocol_ata(task->task_proto) && 3075 } else if (task && sas_protocol_ata(task->task_proto) &&
@@ -3125,31 +3126,6 @@ static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_ba
3125 ireq->target_device->working_request = ireq; 3126 ireq->target_device->working_request = ireq;
3126} 3127}
3127 3128
3128static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
3129{
3130 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3131
3132 ireq->target_device->working_request = ireq;
3133}
3134
3135static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
3136{
3137 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3138 struct scu_task_context *tc = ireq->tc;
3139 struct host_to_dev_fis *h2d_fis;
3140 enum sci_status status;
3141
3142 /* Clear the SRST bit */
3143 h2d_fis = &ireq->stp.cmd;
3144 h2d_fis->control = 0;
3145
3146 /* Clear the TC control bit */
3147 tc->control_frame = 0;
3148
3149 status = sci_controller_continue_io(ireq);
3150 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
3151}
3152
3153static const struct sci_base_state sci_request_state_table[] = { 3129static const struct sci_base_state sci_request_state_table[] = {
3154 [SCI_REQ_INIT] = { }, 3130 [SCI_REQ_INIT] = { },
3155 [SCI_REQ_CONSTRUCTED] = { }, 3131 [SCI_REQ_CONSTRUCTED] = { },
@@ -3168,13 +3144,6 @@ static const struct sci_base_state sci_request_state_table[] = {
3168 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 3144 [SCI_REQ_STP_PIO_DATA_OUT] = { },
3169 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 3145 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3170 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 3146 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3171 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
3172 .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3173 },
3174 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3175 .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3176 },
3177 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3178 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 3147 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3179 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3148 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3180 [SCI_REQ_SMP_WAIT_RESP] = { }, 3149 [SCI_REQ_SMP_WAIT_RESP] = { },
@@ -3649,8 +3618,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
3649 /* Cause this task to be scheduled in the SCSI error 3618 /* Cause this task to be scheduled in the SCSI error
3650 * handler thread. 3619 * handler thread.
3651 */ 3620 */
3652 isci_execpath_callback(ihost, task, 3621 sas_task_abort(task);
3653 sas_task_abort);
3654 3622
3655 /* Change the status, since we are holding 3623 /* Change the status, since we are holding
3656 * the I/O until it is managed by the SCSI 3624 * the I/O until it is managed by the SCSI
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index be38933dd6df..057f2378452d 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -182,138 +182,103 @@ static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
182} 182}
183 183
184/** 184/**
185 * enum sci_base_request_states - This enumeration depicts all the states for 185 * enum sci_base_request_states - request state machine states
186 * the common request state machine.
187 * 186 *
187 * @SCI_REQ_INIT: Simply the initial state for the base request state machine.
188 * 188 *
189 * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been
190 * constructed. This state is entered from the INITIAL state.
191 *
192 * @SCI_REQ_STARTED: This state indicates that the request has been started.
193 * This state is entered from the CONSTRUCTED state.
194 *
195 * @SCI_REQ_STP_UDMA_WAIT_TC_COMP:
196 * @SCI_REQ_STP_UDMA_WAIT_D2H:
197 * @SCI_REQ_STP_NON_DATA_WAIT_H2D:
198 * @SCI_REQ_STP_NON_DATA_WAIT_D2H:
199 *
200 * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is
201 * waiting for the TC completion notification for the H2D Register FIS
202 *
203 * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is
204 * waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame
205 * received is based on the result of the prior frame and line conditions.
206 *
207 * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is
208 * waiting for a DATA frame from the device.
209 *
210 * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is
211 * waiting to transmit the next data frame to the device.
212 *
213 * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is
214 * waiting for the TC completion notification for the H2D Register FIS
215 *
216 * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is
217 * waiting for either a PIO Setup.
218 *
219 * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state
220 * after receiving TC completion. While in this state IO request object is
221 * waiting for D2H status frame as UF.
222 *
223 * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports
224 * task context completion after every frame submission, so in the
225 * non-accelerated case we need to expect the completion for the "cdb" frame.
226 *
227 * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
228 * the started raw task management request is waiting for the transmission of
229 * the initial frame (i.e. command, task, etc.).
230 *
231 * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task
232 * management request is waiting for the reception of an unsolicited frame
233 * (i.e. response IU).
234 *
235 * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task
236 * management request is waiting for the reception of an unsolicited frame
237 * (i.e. response IU).
238 *
239 * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
240 * the started SMP request is waiting for the transmission of the initial frame
241 * (i.e. command, task, etc.).
242 *
243 * @SCI_REQ_COMPLETED: This state indicates that the request has completed.
244 * This state is entered from the STARTED state. This state is entered from the
245 * ABORTING state.
246 *
247 * @SCI_REQ_ABORTING: This state indicates that the request is in the process
248 * of being terminated/aborted. This state is entered from the CONSTRUCTED
249 * state. This state is entered from the STARTED state.
250 *
251 * @SCI_REQ_FINAL: Simply the final state for the base request state machine.
189 */ 252 */
190enum sci_base_request_states { 253#define REQUEST_STATES {\
191 /* 254 C(REQ_INIT),\
192 * Simply the initial state for the base request state machine. 255 C(REQ_CONSTRUCTED),\
193 */ 256 C(REQ_STARTED),\
194 SCI_REQ_INIT, 257 C(REQ_STP_UDMA_WAIT_TC_COMP),\
195 258 C(REQ_STP_UDMA_WAIT_D2H),\
196 /* 259 C(REQ_STP_NON_DATA_WAIT_H2D),\
197 * This state indicates that the request has been constructed. 260 C(REQ_STP_NON_DATA_WAIT_D2H),\
198 * This state is entered from the INITIAL state. 261 C(REQ_STP_PIO_WAIT_H2D),\
199 */ 262 C(REQ_STP_PIO_WAIT_FRAME),\
200 SCI_REQ_CONSTRUCTED, 263 C(REQ_STP_PIO_DATA_IN),\
201 264 C(REQ_STP_PIO_DATA_OUT),\
202 /* 265 C(REQ_ATAPI_WAIT_H2D),\
203 * This state indicates that the request has been started. This state 266 C(REQ_ATAPI_WAIT_PIO_SETUP),\
204 * is entered from the CONSTRUCTED state. 267 C(REQ_ATAPI_WAIT_D2H),\
205 */ 268 C(REQ_ATAPI_WAIT_TC_COMP),\
206 SCI_REQ_STARTED, 269 C(REQ_TASK_WAIT_TC_COMP),\
207 270 C(REQ_TASK_WAIT_TC_RESP),\
208 SCI_REQ_STP_UDMA_WAIT_TC_COMP, 271 C(REQ_SMP_WAIT_RESP),\
209 SCI_REQ_STP_UDMA_WAIT_D2H, 272 C(REQ_SMP_WAIT_TC_COMP),\
210 273 C(REQ_COMPLETED),\
211 SCI_REQ_STP_NON_DATA_WAIT_H2D, 274 C(REQ_ABORTING),\
212 SCI_REQ_STP_NON_DATA_WAIT_D2H, 275 C(REQ_FINAL),\
213 276 }
214 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED, 277#undef C
215 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG, 278#define C(a) SCI_##a
216 SCI_REQ_STP_SOFT_RESET_WAIT_D2H, 279enum sci_base_request_states REQUEST_STATES;
217 280#undef C
218 /* 281const char *req_state_name(enum sci_base_request_states state);
219 * While in this state the IO request object is waiting for the TC
220 * completion notification for the H2D Register FIS
221 */
222 SCI_REQ_STP_PIO_WAIT_H2D,
223
224 /*
225 * While in this state the IO request object is waiting for either a
226 * PIO Setup FIS or a D2H register FIS. The type of frame received is
227 * based on the result of the prior frame and line conditions.
228 */
229 SCI_REQ_STP_PIO_WAIT_FRAME,
230
231 /*
232 * While in this state the IO request object is waiting for a DATA
233 * frame from the device.
234 */
235 SCI_REQ_STP_PIO_DATA_IN,
236
237 /*
238 * While in this state the IO request object is waiting to transmit
239 * the next data frame to the device.
240 */
241 SCI_REQ_STP_PIO_DATA_OUT,
242
243 /*
244 * While in this state the IO request object is waiting for the TC
245 * completion notification for the H2D Register FIS
246 */
247 SCI_REQ_ATAPI_WAIT_H2D,
248
249 /*
250 * While in this state the IO request object is waiting for either a
251 * PIO Setup.
252 */
253 SCI_REQ_ATAPI_WAIT_PIO_SETUP,
254
255 /*
256 * The non-data IO transit to this state in this state after receiving
257 * TC completion. While in this state IO request object is waiting for
258 * D2H status frame as UF.
259 */
260 SCI_REQ_ATAPI_WAIT_D2H,
261
262 /*
263 * When transmitting raw frames hardware reports task context completion
264 * after every frame submission, so in the non-accelerated case we need
265 * to expect the completion for the "cdb" frame.
266 */
267 SCI_REQ_ATAPI_WAIT_TC_COMP,
268
269 /*
270 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
271 * task management request is waiting for the transmission of the
272 * initial frame (i.e. command, task, etc.).
273 */
274 SCI_REQ_TASK_WAIT_TC_COMP,
275
276 /*
277 * This sub-state indicates that the started task management request
278 * is waiting for the reception of an unsolicited frame
279 * (i.e. response IU).
280 */
281 SCI_REQ_TASK_WAIT_TC_RESP,
282
283 /*
284 * This sub-state indicates that the started task management request
285 * is waiting for the reception of an unsolicited frame
286 * (i.e. response IU).
287 */
288 SCI_REQ_SMP_WAIT_RESP,
289
290 /*
291 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
292 * request is waiting for the transmission of the initial frame
293 * (i.e. command, task, etc.).
294 */
295 SCI_REQ_SMP_WAIT_TC_COMP,
296
297 /*
298 * This state indicates that the request has completed.
299 * This state is entered from the STARTED state. This state is entered
300 * from the ABORTING state.
301 */
302 SCI_REQ_COMPLETED,
303
304 /*
305 * This state indicates that the request is in the process of being
306 * terminated/aborted.
307 * This state is entered from the CONSTRUCTED state.
308 * This state is entered from the STARTED state.
309 */
310 SCI_REQ_ABORTING,
311
312 /*
313 * Simply the final state for the base request state machine.
314 */
315 SCI_REQ_FINAL,
316};
317 282
318enum sci_status sci_request_start(struct isci_request *ireq); 283enum sci_status sci_request_start(struct isci_request *ireq);
319enum sci_status sci_io_request_terminate(struct isci_request *ireq); 284enum sci_status sci_io_request_terminate(struct isci_request *ireq);
@@ -446,10 +411,7 @@ sci_task_request_construct(struct isci_host *ihost,
446 struct isci_remote_device *idev, 411 struct isci_remote_device *idev,
447 u16 io_tag, 412 u16 io_tag,
448 struct isci_request *ireq); 413 struct isci_request *ireq);
449enum sci_status 414enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
450sci_task_request_construct_ssp(struct isci_request *ireq);
451enum sci_status
452sci_task_request_construct_sata(struct isci_request *ireq);
453void sci_smp_request_copy_response(struct isci_request *ireq); 415void sci_smp_request_copy_response(struct isci_request *ireq);
454 416
455static inline int isci_task_is_ncq_recovery(struct sas_task *task) 417static inline int isci_task_is_ncq_recovery(struct sas_task *task)
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
index 7df87d923285..869a979eb5b2 100644
--- a/drivers/scsi/isci/scu_task_context.h
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -866,9 +866,9 @@ struct scu_task_context {
866 struct transport_snapshot snapshot; /* read only set to 0 */ 866 struct transport_snapshot snapshot; /* read only set to 0 */
867 867
868 /* OFFSET 0x5C */ 868 /* OFFSET 0x5C */
869 u32 block_protection_enable:1; 869 u32 blk_prot_en:1;
870 u32 block_size:2; 870 u32 blk_sz:2;
871 u32 block_protection_function:2; 871 u32 blk_prot_func:2;
872 u32 reserved_5C_0:9; 872 u32 reserved_5C_0:9;
873 u32 active_sgl_element:2; /* read only set to 0 */ 873 u32 active_sgl_element:2; /* read only set to 0 */
874 u32 sgl_exhausted:1; /* read only set to 0 */ 874 u32 sgl_exhausted:1; /* read only set to 0 */
@@ -896,33 +896,56 @@ struct scu_task_context {
896 u32 reserved_C4_CC[3]; 896 u32 reserved_C4_CC[3];
897 897
898 /* OFFSET 0xD0 */ 898 /* OFFSET 0xD0 */
899 u32 intermediate_crc_value:16; 899 u32 interm_crc_val:16;
900 u32 initial_crc_seed:16; 900 u32 init_crc_seed:16;
901 901
902 /* OFFSET 0xD4 */ 902 /* OFFSET 0xD4 */
903 u32 application_tag_for_verify:16; 903 u32 app_tag_verify:16;
904 u32 application_tag_for_generate:16; 904 u32 app_tag_gen:16;
905 905
906 /* OFFSET 0xD8 */ 906 /* OFFSET 0xD8 */
907 u32 reference_tag_seed_for_verify_function; 907 u32 ref_tag_seed_verify;
908 908
909 /* OFFSET 0xDC */ 909 /* OFFSET 0xDC */
910 u32 reserved_DC; 910 u32 UD_bytes_immed_val:13;
911 u32 reserved_DC_0:3;
912 u32 DIF_bytes_immed_val:4;
913 u32 reserved_DC_1:12;
911 914
912 /* OFFSET 0xE0 */ 915 /* OFFSET 0xE0 */
913 u32 reserved_E0_0:16; 916 u32 bgc_blk_sz:13;
914 u32 application_tag_mask_for_generate:16; 917 u32 reserved_E0_0:3;
918 u32 app_tag_gen_mask:16;
915 919
916 /* OFFSET 0xE4 */ 920 /* OFFSET 0xE4 */
917 u32 block_protection_control:16; 921 union {
918 u32 application_tag_mask_for_verify:16; 922 u16 bgctl;
923 struct {
924 u16 crc_verify:1;
925 u16 app_tag_chk:1;
926 u16 ref_tag_chk:1;
927 u16 op:2;
928 u16 legacy:1;
929 u16 invert_crc_seed:1;
930 u16 ref_tag_gen:1;
931 u16 fixed_ref_tag:1;
932 u16 invert_crc:1;
933 u16 app_ref_f_detect:1;
934 u16 uninit_dif_check_err:1;
935 u16 uninit_dif_bypass:1;
936 u16 app_f_detect:1;
937 u16 reserved_0:2;
938 } bgctl_f;
939 };
940
941 u16 app_tag_verify_mask;
919 942
920 /* OFFSET 0xE8 */ 943 /* OFFSET 0xE8 */
921 u32 block_protection_error:8; 944 u32 blk_guard_err:8;
922 u32 reserved_E8_0:24; 945 u32 reserved_E8_0:24;
923 946
924 /* OFFSET 0xEC */ 947 /* OFFSET 0xEC */
925 u32 reference_tag_seed_for_verify; 948 u32 ref_tag_seed_gen;
926 949
927 /* OFFSET 0xF0 */ 950 /* OFFSET 0xF0 */
928 u32 intermediate_crc_valid_snapshot:16; 951 u32 intermediate_crc_valid_snapshot:16;
@@ -937,6 +960,6 @@ struct scu_task_context {
937 /* OFFSET 0xFC */ 960 /* OFFSET 0xFC */
938 u32 reference_tag_seed_for_generate_function_snapshot; 961 u32 reference_tag_seed_for_generate_function_snapshot;
939 962
940}; 963} __packed;
941 964
942#endif /* _SCU_TASK_CONTEXT_H_ */ 965#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index f5a3f7d2bdab..374254ede9d4 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -96,8 +96,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
96 __func__, task, response, status); 96 __func__, task, response, status);
97 97
98 task->lldd_task = NULL; 98 task->lldd_task = NULL;
99 99 task->task_done(task);
100 isci_execpath_callback(ihost, task, task->task_done);
101 break; 100 break;
102 101
103 case isci_perform_aborted_io_completion: 102 case isci_perform_aborted_io_completion:
@@ -117,8 +116,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
117 "%s: Error - task = %p, response=%d, " 116 "%s: Error - task = %p, response=%d, "
118 "status=%d\n", 117 "status=%d\n",
119 __func__, task, response, status); 118 __func__, task, response, status);
120 119 sas_task_abort(task);
121 isci_execpath_callback(ihost, task, sas_task_abort);
122 break; 120 break;
123 121
124 default: 122 default:
@@ -249,46 +247,6 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
249 return 0; 247 return 0;
250} 248}
251 249
252static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
253{
254 struct isci_tmf *isci_tmf;
255 enum sci_status status;
256
257 if (!test_bit(IREQ_TMF, &ireq->flags))
258 return SCI_FAILURE;
259
260 isci_tmf = isci_request_access_tmf(ireq);
261
262 switch (isci_tmf->tmf_code) {
263
264 case isci_tmf_sata_srst_high:
265 case isci_tmf_sata_srst_low: {
266 struct host_to_dev_fis *fis = &ireq->stp.cmd;
267
268 memset(fis, 0, sizeof(*fis));
269
270 fis->fis_type = 0x27;
271 fis->flags &= ~0x80;
272 fis->flags &= 0xF0;
273 if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
274 fis->control |= ATA_SRST;
275 else
276 fis->control &= ~ATA_SRST;
277 break;
278 }
279 /* other management commnd go here... */
280 default:
281 return SCI_FAILURE;
282 }
283
284 /* core builds the protocol specific request
285 * based on the h2d fis.
286 */
287 status = sci_task_request_construct_sata(ireq);
288
289 return status;
290}
291
292static struct isci_request *isci_task_request_build(struct isci_host *ihost, 250static struct isci_request *isci_task_request_build(struct isci_host *ihost,
293 struct isci_remote_device *idev, 251 struct isci_remote_device *idev,
294 u16 tag, struct isci_tmf *isci_tmf) 252 u16 tag, struct isci_tmf *isci_tmf)
@@ -328,13 +286,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
328 return NULL; 286 return NULL;
329 } 287 }
330 288
331 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
332 isci_tmf->proto = SAS_PROTOCOL_SATA;
333 status = isci_sata_management_task_request_build(ireq);
334
335 if (status != SCI_SUCCESS)
336 return NULL;
337 }
338 return ireq; 289 return ireq;
339} 290}
340 291
@@ -873,53 +824,20 @@ static int isci_task_send_lu_reset_sas(
873 return ret; 824 return ret;
874} 825}
875 826
876static int isci_task_send_lu_reset_sata(struct isci_host *ihost, 827int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
877 struct isci_remote_device *idev, u8 *lun)
878{
879 int ret = TMF_RESP_FUNC_FAILED;
880 struct isci_tmf tmf;
881
882 /* Send the soft reset to the target */
883 #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
884 isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
885
886 ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
887
888 if (ret != TMF_RESP_FUNC_COMPLETE) {
889 dev_dbg(&ihost->pdev->dev,
890 "%s: Assert SRST failed (%p) = %x",
891 __func__, idev, ret);
892
893 /* Return the failure so that the LUN reset is escalated
894 * to a target reset.
895 */
896 }
897 return ret;
898}
899
900/**
901 * isci_task_lu_reset() - This function is one of the SAS Domain Template
902 * functions. This is one of the Task Management functoins called by libsas,
903 * to reset the given lun. Note the assumption that while this call is
904 * executing, no I/O will be sent by the host to the device.
905 * @lun: This parameter specifies the lun to be reset.
906 *
907 * status, zero indicates success.
908 */
909int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
910{ 828{
911 struct isci_host *isci_host = dev_to_ihost(domain_device); 829 struct isci_host *isci_host = dev_to_ihost(dev);
912 struct isci_remote_device *isci_device; 830 struct isci_remote_device *isci_device;
913 unsigned long flags; 831 unsigned long flags;
914 int ret; 832 int ret;
915 833
916 spin_lock_irqsave(&isci_host->scic_lock, flags); 834 spin_lock_irqsave(&isci_host->scic_lock, flags);
917 isci_device = isci_lookup_device(domain_device); 835 isci_device = isci_lookup_device(dev);
918 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 836 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
919 837
920 dev_dbg(&isci_host->pdev->dev, 838 dev_dbg(&isci_host->pdev->dev,
921 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", 839 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
922 __func__, domain_device, isci_host, isci_device); 840 __func__, dev, isci_host, isci_device);
923 841
924 if (!isci_device) { 842 if (!isci_device) {
925 /* If the device is gone, stop the escalations. */ 843 /* If the device is gone, stop the escalations. */
@@ -928,11 +846,11 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
928 ret = TMF_RESP_FUNC_COMPLETE; 846 ret = TMF_RESP_FUNC_COMPLETE;
929 goto out; 847 goto out;
930 } 848 }
931 set_bit(IDEV_EH, &isci_device->flags);
932 849
933 /* Send the task management part of the reset. */ 850 /* Send the task management part of the reset. */
934 if (sas_protocol_ata(domain_device->tproto)) { 851 if (dev_is_sata(dev)) {
935 ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun); 852 sas_ata_schedule_reset(dev);
853 ret = TMF_RESP_FUNC_COMPLETE;
936 } else 854 } else
937 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); 855 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
938 856
@@ -1062,9 +980,6 @@ int isci_task_abort_task(struct sas_task *task)
1062 "%s: dev = %p, task = %p, old_request == %p\n", 980 "%s: dev = %p, task = %p, old_request == %p\n",
1063 __func__, isci_device, task, old_request); 981 __func__, isci_device, task, old_request);
1064 982
1065 if (isci_device)
1066 set_bit(IDEV_EH, &isci_device->flags);
1067
1068 /* Device reset conditions signalled in task_state_flags are the 983 /* Device reset conditions signalled in task_state_flags are the
1069 * responsbility of libsas to observe at the start of the error 984 * responsbility of libsas to observe at the start of the error
1070 * handler thread. 985 * handler thread.
@@ -1332,29 +1247,35 @@ isci_task_request_complete(struct isci_host *ihost,
1332} 1247}
1333 1248
1334static int isci_reset_device(struct isci_host *ihost, 1249static int isci_reset_device(struct isci_host *ihost,
1250 struct domain_device *dev,
1335 struct isci_remote_device *idev) 1251 struct isci_remote_device *idev)
1336{ 1252{
1337 struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1338 enum sci_status status;
1339 unsigned long flags;
1340 int rc; 1253 int rc;
1254 unsigned long flags;
1255 enum sci_status status;
1256 struct sas_phy *phy = sas_get_local_phy(dev);
1257 struct isci_port *iport = dev->port->lldd_port;
1341 1258
1342 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); 1259 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1343 1260
1344 spin_lock_irqsave(&ihost->scic_lock, flags); 1261 spin_lock_irqsave(&ihost->scic_lock, flags);
1345 status = sci_remote_device_reset(idev); 1262 status = sci_remote_device_reset(idev);
1346 if (status != SCI_SUCCESS) { 1263 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1347 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1348 1264
1265 if (status != SCI_SUCCESS) {
1349 dev_dbg(&ihost->pdev->dev, 1266 dev_dbg(&ihost->pdev->dev,
1350 "%s: sci_remote_device_reset(%p) returned %d!\n", 1267 "%s: sci_remote_device_reset(%p) returned %d!\n",
1351 __func__, idev, status); 1268 __func__, idev, status);
1352 1269 rc = TMF_RESP_FUNC_FAILED;
1353 return TMF_RESP_FUNC_FAILED; 1270 goto out;
1354 } 1271 }
1355 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1356 1272
1357 rc = sas_phy_reset(phy, true); 1273 if (scsi_is_sas_phy_local(phy)) {
1274 struct isci_phy *iphy = &ihost->phys[phy->number];
1275
1276 rc = isci_port_perform_hard_reset(ihost, iport, iphy);
1277 } else
1278 rc = sas_phy_reset(phy, !dev_is_sata(dev));
1358 1279
1359 /* Terminate in-progress I/O now. */ 1280 /* Terminate in-progress I/O now. */
1360 isci_remote_device_nuke_requests(ihost, idev); 1281 isci_remote_device_nuke_requests(ihost, idev);
@@ -1371,7 +1292,8 @@ static int isci_reset_device(struct isci_host *ihost,
1371 } 1292 }
1372 1293
1373 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); 1294 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1374 1295 out:
1296 sas_put_local_phy(phy);
1375 return rc; 1297 return rc;
1376} 1298}
1377 1299
@@ -1386,35 +1308,15 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
1386 idev = isci_lookup_device(dev); 1308 idev = isci_lookup_device(dev);
1387 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1309 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1388 1310
1389 if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1390 ret = TMF_RESP_FUNC_COMPLETE;
1391 goto out;
1392 }
1393
1394 ret = isci_reset_device(ihost, idev);
1395 out:
1396 isci_put_device(idev);
1397 return ret;
1398}
1399
1400int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1401{
1402 struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1403 struct isci_host *ihost = dev_to_ihost(dev);
1404 struct isci_remote_device *idev;
1405 unsigned long flags;
1406 int ret;
1407
1408 spin_lock_irqsave(&ihost->scic_lock, flags);
1409 idev = isci_lookup_device(dev);
1410 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1411
1412 if (!idev) { 1311 if (!idev) {
1312 /* XXX: need to cleanup any ireqs targeting this
1313 * domain_device
1314 */
1413 ret = TMF_RESP_FUNC_COMPLETE; 1315 ret = TMF_RESP_FUNC_COMPLETE;
1414 goto out; 1316 goto out;
1415 } 1317 }
1416 1318
1417 ret = isci_reset_device(ihost, idev); 1319 ret = isci_reset_device(ihost, dev, idev);
1418 out: 1320 out:
1419 isci_put_device(idev); 1321 isci_put_device(idev);
1420 return ret; 1322 return ret;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 1b27b3797c6c..7b6d0e32fd9b 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -86,8 +86,6 @@ enum isci_tmf_function_codes {
86 isci_tmf_func_none = 0, 86 isci_tmf_func_none = 0,
87 isci_tmf_ssp_task_abort = TMF_ABORT_TASK, 87 isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
88 isci_tmf_ssp_lun_reset = TMF_LU_RESET, 88 isci_tmf_ssp_lun_reset = TMF_LU_RESET,
89 isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
90 isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
91}; 89};
92/** 90/**
93 * struct isci_tmf - This class represents the task management object which 91 * struct isci_tmf - This class represents the task management object which
@@ -210,8 +208,6 @@ int isci_queuecommand(
210 struct scsi_cmnd *scsi_cmd, 208 struct scsi_cmnd *scsi_cmd,
211 void (*donefunc)(struct scsi_cmnd *)); 209 void (*donefunc)(struct scsi_cmnd *));
212 210
213int isci_bus_reset_handler(struct scsi_cmnd *cmd);
214
215/** 211/**
216 * enum isci_completion_selection - This enum defines the possible actions to 212 * enum isci_completion_selection - This enum defines the possible actions to
217 * take with respect to a given request's notification back to libsas. 213 * take with respect to a given request's notification back to libsas.
@@ -321,40 +317,4 @@ isci_task_set_completion_status(
321 return task_notification_selection; 317 return task_notification_selection;
322 318
323} 319}
324/**
325* isci_execpath_callback() - This function is called from the task
326* execute path when the task needs to callback libsas about the submit-time
327* task failure. The callback occurs either through the task's done function
328* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
329* requests, libsas takes the host lock before calling execute task. Therefore
330* in this situation the host lock must be managed before calling the func.
331*
332* @ihost: This parameter is the controller to which the I/O request was sent.
333* @task: This parameter is the I/O request.
334* @func: This parameter is the function to call in the correct context.
335* @status: This parameter is the status code for the completed task.
336*
337*/
338static inline void isci_execpath_callback(struct isci_host *ihost,
339 struct sas_task *task,
340 void (*func)(struct sas_task *))
341{
342 struct domain_device *dev = task->dev;
343
344 if (dev_is_sata(dev) && task->uldd_task) {
345 unsigned long flags;
346
347 /* Since we are still in the submit path, and since
348 * libsas takes the host lock on behalf of SATA
349 * devices before I/O starts (in the non-discovery case),
350 * we need to unlock before we can call the callback function.
351 */
352 raw_local_irq_save(flags);
353 spin_unlock(dev->sata_dev.ap->lock);
354 func(task);
355 spin_lock(dev->sata_dev.ap->lock);
356 raw_local_irq_restore(flags);
357 } else
358 func(task);
359}
360#endif /* !defined(_SCI_TASK_H_) */ 320#endif /* !defined(_SCI_TASK_H_) */